diff options
Diffstat (limited to 'drivers')
437 files changed, 6732 insertions, 3591 deletions
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 127408069ca7..631b9477b99c 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab) static int erst_open_pstore(struct pstore_info *psi); static int erst_close_pstore(struct pstore_info *psi); static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, - struct timespec *time, struct pstore_info *psi); + struct timespec *time, char **buf, + struct pstore_info *psi); static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part, size_t size, struct pstore_info *psi); static int erst_clearer(enum pstore_type_id type, u64 id, @@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi) } static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, - struct timespec *time, struct pstore_info *psi) + struct timespec *time, char **buf, + struct pstore_info *psi) { int rc; ssize_t len = 0; u64 record_id; - struct cper_pstore_record *rcd = (struct cper_pstore_record *) - (erst_info.buf - sizeof(*rcd)); + struct cper_pstore_record *rcd; + size_t rcd_len = sizeof(*rcd) + erst_info.bufsize; if (erst_disable) return -ENODEV; + rcd = kmalloc(rcd_len, GFP_KERNEL); + if (!rcd) { + rc = -ENOMEM; + goto out; + } skip: rc = erst_get_record_id_next(&reader_pos, &record_id); if (rc) @@ -1004,22 +1011,27 @@ skip: /* no more record */ if (record_id == APEI_ERST_INVALID_RECORD_ID) { - rc = -1; + rc = -EINVAL; goto out; } - len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) + - erst_info.bufsize); + len = erst_read(record_id, &rcd->hdr, rcd_len); /* The record may be cleared by others, try read next record */ if (len == -ENOENT) goto skip; - else if (len < 0) { - rc = -1; + else if (len < sizeof(*rcd)) { + rc = -EIO; goto out; } if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0) goto skip; + *buf = kmalloc(len, GFP_KERNEL); + if (*buf == NULL) { + rc = -ENOMEM; + goto out; + } + memcpy(*buf, rcd->data, len - sizeof(*rcd)); *id = record_id; if (uuid_le_cmp(rcd->sec_hdr.section_type, CPER_SECTION_TYPE_DMESG) == 0) @@ -1037,6 +1049,7 @@ skip: time->tv_nsec = 0; out: + kfree(rcd); return (rc < 0) ? rc : (len - sizeof(*rcd)); } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 73b2909dddfe..0e8e2de2ed3e 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -224,7 +224,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, /* * Suspend / resume control */ -static int acpi_idle_suspend; static u32 saved_bm_rld; static void acpi_idle_bm_rld_save(void) @@ -243,21 +242,13 @@ static void acpi_idle_bm_rld_restore(void) int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) { - if (acpi_idle_suspend == 1) - return 0; - acpi_idle_bm_rld_save(); - acpi_idle_suspend = 1; return 0; } int acpi_processor_resume(struct acpi_device * device) { - if (acpi_idle_suspend == 0) - return 0; - acpi_idle_bm_rld_restore(); - acpi_idle_suspend = 0; return 0; } @@ -763,13 +754,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, local_irq_disable(); - /* Do not access any ACPI IO ports in suspend path */ - if (acpi_idle_suspend) { - local_irq_enable(); - cpu_relax(); - return -EINVAL; - } - lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); acpi_idle_do_entry(cx); @@ -810,13 +794,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, local_irq_disable(); - if (acpi_idle_suspend) { - local_irq_enable(); - cpu_relax(); - return -EINVAL; - } - - if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -895,12 +872,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, if (unlikely(!pr)) return -EINVAL; - - if (acpi_idle_suspend) { - cpu_relax(); - return -EINVAL; - } - if (!cx->bm_sts_skip && acpi_idle_bm_check()) { if (drv->safe_state_index >= 0) { return drv->states[drv->safe_state_index].enter(dev, diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index fb7b90b05922..cf26222a93c5 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -390,6 +390,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ + /* Asmedia */ + { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */ + /* Generic, PCI class code for AHCI */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 004f2ce3dc73..43b875810d1b 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -65,9 +65,9 @@ static struct scsi_host_template ahci_platform_sht = { static int __init ahci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct ahci_platform_data *pdata = dev->platform_data; + struct ahci_platform_data *pdata = dev_get_platdata(dev); const struct platform_device_id *id = platform_get_device_id(pdev); - struct ata_port_info pi = ahci_port_info[id->driver_data]; + struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0]; const struct ata_port_info *ppi[] = { &pi, NULL }; struct ahci_host_priv *hpriv; struct ata_host *host; @@ -191,7 +191,7 @@ err0: static int __devexit ahci_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct ahci_platform_data *pdata = dev->platform_data; + struct ahci_platform_data *pdata = dev_get_platdata(dev); struct ata_host *host = dev_get_drvdata(dev); ata_host_detach(host); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index f22957c2769a..a9b282038000 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2883,7 +2883,7 @@ int ata_eh_reset(struct ata_link *link, int classify, sata_scr_read(link, SCR_STATUS, &sstatus)) rc = -ERESTART; - if (rc == -ERESTART || try >= max_tries) { + if (try >= max_tries) { /* * Thaw host port even if reset failed, so that the port * can be retried on the next phy event. This risks @@ -2909,6 +2909,16 @@ int ata_eh_reset(struct ata_link *link, int classify, ata_eh_acquire(ap); } + /* + * While disks spinup behind PMP, some controllers fail sending SRST. + * They need to be reset - as well as the PMP - before retrying. + */ + if (rc == -ERESTART) { + if (ata_is_host_link(link)) + ata_eh_thaw_port(ap); + goto out; + } + if (try == max_tries - 1) { sata_down_spd_limit(link, 0); if (slave) diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 104462dbc524..21b80c555c60 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -389,12 +389,9 @@ static void sata_pmp_quirks(struct ata_port *ap) /* link reports offline after LPM */ link->flags |= ATA_LFLAG_NO_LPM; - /* Class code report is unreliable and SRST - * times out under certain configurations. - */ + /* Class code report is unreliable. */ if (link->pmp < 5) - link->flags |= ATA_LFLAG_NO_SRST | - ATA_LFLAG_ASSUME_ATA; + link->flags |= ATA_LFLAG_ASSUME_ATA; /* port 5 is for SEMB device and it doesn't like SRST */ if (link->pmp == 5) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 72a9770ac42f..2a5412e7e9c1 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1217,6 +1217,10 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) /** * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth + * @ap: ATA port to which the device change the queue depth + * @sdev: SCSI device to configure queue depth for + * @queue_depth: new queue depth + * @reason: calling context * * libsas and libata have different approaches for associating a sdev to * its ata_port. diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 63d53277d6a9..4cadfa28f940 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -2533,10 +2533,12 @@ static int ata_pci_init_one(struct pci_dev *pdev, if (rc) goto out; +#ifdef CONFIG_ATA_BMDMA if (bmdma) /* prepare and activate BMDMA host */ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); else +#endif /* prepare and activate SFF host */ rc = ata_pci_sff_prepare_host(pdev, ppi, &host); if (rc) @@ -2544,10 +2546,12 @@ static int ata_pci_init_one(struct pci_dev *pdev, host->private_data = host_priv; host->flags |= hflags; +#ifdef CONFIG_ATA_BMDMA if (bmdma) { pci_set_master(pdev); rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); } else +#endif rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); out: if (rc == 0) diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c index a72ab0dde4e5..2a472c5bb7db 100644 --- a/drivers/ata/pata_of_platform.c +++ b/drivers/ata/pata_of_platform.c @@ -52,7 +52,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev) } ret = of_irq_to_resource(dn, 0, &irq_res); - if (ret == NO_IRQ) + if (!ret) irq_res.start = irq_res.end = 0; else irq_res.flags = 0; diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index 447d9c05fb5a..95ec435f0eb4 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c @@ -104,7 +104,7 @@ static const struct ata_port_info sis_port_info = { }; MODULE_AUTHOR("Uwe Koziolek"); -MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller"); +MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sis_pci_tbl); MODULE_VERSION(DRV_VERSION); diff --git a/drivers/base/core.c b/drivers/base/core.c index 82c865452c70..919daa7cd5b1 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -22,6 +22,7 @@ #include <linux/kallsyms.h> #include <linux/mutex.h> #include <linux/async.h> +#include <linux/pm_runtime.h> #include "base.h" #include "power/power.h" @@ -1743,6 +1744,10 @@ void device_shutdown(void) list_del_init(&dev->kobj.entry); spin_unlock(&devices_kset->list_lock); + /* Don't allow any more runtime suspends */ + pm_runtime_get_noresume(dev); + pm_runtime_barrier(dev); + if (dev->bus && dev->bus->shutdown) { dev_dbg(dev, "shutdown\n"); dev->bus->shutdown(dev); diff --git a/drivers/base/node.c b/drivers/base/node.c index 793f796c4da3..5693ecee9a40 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -127,12 +127,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev, nid, K(node_page_state(nid, NR_WRITEBACK)), nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), - nid, K(node_page_state(nid, NR_ANON_PAGES) #ifdef CONFIG_TRANSPARENT_HUGEPAGE + nid, K(node_page_state(nid, NR_ANON_PAGES) + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR + HPAGE_PMD_NR), +#else + nid, K(node_page_state(nid, NR_ANON_PAGES)), #endif - ), nid, K(node_page_state(nid, NR_SHMEM)), nid, node_page_state(nid, NR_KERNEL_STACK) * THREAD_SIZE / 1024, @@ -143,13 +144,14 @@ static ssize_t node_read_meminfo(struct sys_device * dev, nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), - nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) #ifdef CONFIG_TRANSPARENT_HUGEPAGE + nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) , nid, K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR) + HPAGE_PMD_NR)); +#else + nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); #endif - ); n += hugetlb_report_node_meminfo(nid, buf + n); return n; } diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 5f0f85d5c576..428e55e012dc 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -229,7 +229,8 @@ int pm_clk_suspend(struct device *dev) list_for_each_entry_reverse(ce, &psd->clock_list, node) { if (ce->status < PCE_STATUS_ERROR) { - clk_disable(ce->clk); + if (ce->status == PCE_STATUS_ENABLED) + clk_disable(ce->clk); ce->status = PCE_STATUS_ACQUIRED; } } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 7fa098464dae..c3d2dfcf438d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -920,7 +920,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) End: if (!error) { dev->power.is_suspended = true; - if (dev->power.wakeup_path && dev->parent) + if (dev->power.wakeup_path + && dev->parent && !dev->parent->power.ignore_children) dev->parent->power.wakeup_path = true; } diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 434a6c011675..95706fa24c73 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -669,7 +669,7 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev) struct device_opp *dev_opp = find_device_opp(dev); if (IS_ERR(dev_opp)) - return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */ + return ERR_CAST(dev_opp); /* matching type */ return &dev_opp->head; } diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 30a94eadc200..86de6c50fc41 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -212,11 +212,9 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, if (!dev || !req) /*guard against callers passing in null */ return -EINVAL; - if (dev_pm_qos_request_active(req)) { - WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already " - "added request\n"); + if (WARN(dev_pm_qos_request_active(req), + "%s() called for already added request\n", __func__)) return -EINVAL; - } req->dev = dev; @@ -271,11 +269,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, if (!req) /*guard against callers passing in null */ return -EINVAL; - if (!dev_pm_qos_request_active(req)) { - WARN(1, KERN_ERR "dev_pm_qos_update_request() called for " - "unknown object\n"); + if (WARN(!dev_pm_qos_request_active(req), + "%s() called for unknown object\n", __func__)) return -EINVAL; - } mutex_lock(&dev_pm_qos_mtx); @@ -312,11 +308,9 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) if (!req) /*guard against callers passing in null */ return -EINVAL; - if (!dev_pm_qos_request_active(req)) { - WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for " - "unknown object\n"); + if (WARN(!dev_pm_qos_request_active(req), + "%s() called for unknown object\n", __func__)) return -EINVAL; - } mutex_lock(&dev_pm_qos_mtx); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 486f94ef24d4..8004ac30a7a8 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -24,6 +24,7 @@ #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> +#include <linux/pci-aspm.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> @@ -4319,6 +4320,10 @@ static int __devinit cciss_pci_init(ctlr_info_t *h) dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); return -ENODEV; } + + pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); + err = pci_enable_device(h->pdev); if (err) { dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); @@ -5158,6 +5163,7 @@ reinit_after_soft_reset: h->cciss_max_sectors = 8192; rebuild_lun_table(h, 1, 0); + cciss_engage_scsi(h); h->busy_initializing = 0; return 1; diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 951a4e33b92b..e820b68d2f6c 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -1720,5 +1720,6 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) /* If no tape support, then these become defined out of existence */ #define cciss_scsi_setup(cntl_num) +#define cciss_engage_scsi(h) #endif /* CONFIG_CISS_SCSI_TAPE */ diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 3d806820280e..68b205a9338f 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -161,17 +161,19 @@ static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { &xor_funcs }; -static loff_t get_loop_size(struct loop_device *lo, struct file *file) +static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) { - loff_t size, offset, loopsize; + loff_t size, loopsize; /* Compute loopsize in bytes */ size = i_size_read(file->f_mapping->host); - offset = lo->lo_offset; loopsize = size - offset; - if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) - loopsize = lo->lo_sizelimit; + /* offset is beyond i_size, wierd but possible */ + if (loopsize < 0) + return 0; + if (sizelimit > 0 && sizelimit < loopsize) + loopsize = sizelimit; /* * Unfortunately, if we want to do I/O on the device, * the number of 512-byte sectors has to fit into a sector_t. @@ -179,17 +181,25 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file) return loopsize >> 9; } +static loff_t get_loop_size(struct loop_device *lo, struct file *file) +{ + return get_size(lo->lo_offset, lo->lo_sizelimit, file); +} + static int -figure_loop_size(struct loop_device *lo) +figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) { - loff_t size = get_loop_size(lo, lo->lo_backing_file); + loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); sector_t x = (sector_t)size; if (unlikely((loff_t)x != size)) return -EFBIG; - + if (lo->lo_offset != offset) + lo->lo_offset = offset; + if (lo->lo_sizelimit != sizelimit) + lo->lo_sizelimit = sizelimit; set_capacity(lo->lo_disk, x); - return 0; + return 0; } static inline int @@ -372,7 +382,8 @@ do_lo_receive(struct loop_device *lo, if (retval < 0) return retval; - + if (retval != bvec->bv_len) + return -EIO; return 0; } @@ -1058,9 +1069,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { - lo->lo_offset = info->lo_offset; - lo->lo_sizelimit = info->lo_sizelimit; - if (figure_loop_size(lo)) + if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) return -EFBIG; } loop_config_discard(lo); @@ -1246,7 +1255,7 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev) err = -ENXIO; if (unlikely(lo->lo_state != Lo_bound)) goto out; - err = figure_loop_size(lo); + err = figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); if (unlikely(err)) goto out; sec = get_capacity(lo->lo_disk); @@ -1284,13 +1293,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, goto out_unlocked; break; case LOOP_SET_STATUS: - err = loop_set_status_old(lo, (struct loop_info __user *) arg); + err = -EPERM; + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) + err = loop_set_status_old(lo, + (struct loop_info __user *)arg); break; case LOOP_GET_STATUS: err = loop_get_status_old(lo, (struct loop_info __user *) arg); break; case LOOP_SET_STATUS64: - err = loop_set_status64(lo, (struct loop_info64 __user *) arg); + err = -EPERM; + if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) + err = loop_set_status64(lo, + (struct loop_info64 __user *) arg); break; case LOOP_GET_STATUS64: err = loop_get_status64(lo, (struct loop_info64 __user *) arg); diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 6b9a2000d56a..a79fb4f7ff62 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c @@ -630,6 +630,7 @@ static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t if (dev->status & 0x10) return -ETIME; + memset(&hdr, 0, sizeof(hdr)); hdr.magic = PG_MAGIC; hdr.dlen = dev->dlen; copy = 0; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index f9b726091ad0..fe4ebc375b3d 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -100,6 +100,9 @@ static struct usb_device_id btusb_table[] = { /* Canyon CN-BTU1 with HID interfaces */ { USB_DEVICE(0x0c10, 0x0000) }, + /* Broadcom BCM20702A0 */ + { USB_DEVICE(0x413c, 0x8197) }, + { } /* Terminating entry */ }; diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 66cd0b8096ca..c92424ca1a55 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1186,10 +1186,11 @@ static void gen6_cleanup(void) /* Certain Gen5 chipsets require require idling the GPU before * unmapping anything from the GTT when VT-d is enabled. */ -extern int intel_iommu_gfx_mapped; static inline int needs_idle_maps(void) { +#ifdef CONFIG_INTEL_IOMMU const unsigned short gpu_devid = intel_private.pcidev->device; + extern int intel_iommu_gfx_mapped; /* Query intel_iommu to see if we need the workaround. Presumably that * was loaded first. @@ -1198,7 +1199,7 @@ static inline int needs_idle_maps(void) gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && intel_iommu_gfx_mapped) return 1; - +#endif return 0; } @@ -1236,7 +1237,7 @@ static int i9xx_setup(void) intel_private.gtt_bus_addr = reg_addr + gtt_offset; } - if (needs_idle_maps()); + if (needs_idle_maps()) intel_private.base.do_idle_maps = 1; intel_i9xx_setup_flush(); diff --git a/drivers/char/random.c b/drivers/char/random.c index 63e19ba56bbe..6035ab8d5ef7 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -941,7 +941,7 @@ void get_random_bytes(void *buf, int nbytes) if (!arch_get_random_long(&v)) break; - memcpy(buf, &v, chunk); + memcpy(p, &v, chunk); p += chunk; nbytes -= chunk; } diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c index edaa987621ea..f5002015d82e 100644 --- a/drivers/cpufreq/db8500-cpufreq.c +++ b/drivers/cpufreq/db8500-cpufreq.c @@ -109,7 +109,7 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu) static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) { - int res; + int i, res; BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table)); @@ -120,8 +120,8 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) freq_table[3].frequency = 1000000; } pr_info("db8500-cpufreq : Available frequencies:\n"); - while (freq_table[i].frequency != CPUFREQ_TABLE_END) - pr_info(" %d Mhz\n", freq_table[i++].frequency/1000); + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) + pr_info(" %d Mhz\n", freq_table[i].frequency/1000); /* get policy fields based on the table */ res = cpufreq_frequency_table_cpuinfo(policy, freq_table); diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 5c6f56f21443..dcd8babae9eb 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -343,11 +343,13 @@ static void mv_process_hash_current(int first_block) else op.config |= CFG_MID_FRAG; - writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); - writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); - writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); - writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); - writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); + if (first_block) { + writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); + writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); + writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); + writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); + writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); + } } memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 643b055ed3cd..8f0491037080 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -1,36 +1,29 @@ -config ARCH_HAS_DEVFREQ - bool - depends on ARCH_HAS_OPP - help - Denotes that the architecture supports DEVFREQ. If the architecture - supports multiple OPP entries per device and the frequency of the - devices with OPPs may be altered dynamically, the architecture - supports DEVFREQ. - menuconfig PM_DEVFREQ bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support" - depends on PM_OPP && ARCH_HAS_DEVFREQ help - With OPP support, a device may have a list of frequencies and - voltages available. DEVFREQ, a generic DVFS framework can be - registered for a device with OPP support in order to let the - governor provided to DEVFREQ choose an operating frequency - based on the OPP's list and the policy given with DEVFREQ. + A device may have a list of frequencies and voltages available. + devfreq, a generic DVFS framework can be registered for a device + in order to let the governor provided to devfreq choose an + operating frequency based on the device driver's policy. - Each device may have its own governor and policy. DEVFREQ can + Each device may have its own governor and policy. Devfreq can reevaluate the device state periodically and/or based on the - OPP list changes (each frequency/voltage pair in OPP may be - disabled or enabled). + notification to "nb", a notifier block, of devfreq. - Like some CPUs with CPUFREQ, a device may have multiple clocks. + Like some CPUs with CPUfreq, a device may have multiple clocks. However, because the clock frequencies of a single device are - determined by the single device's state, an instance of DEVFREQ + determined by the single device's state, an instance of devfreq is attached to a single device and returns a "representative" - clock frequency from the OPP of the device, which is also attached - to a device by 1-to-1. The device registering DEVFREQ takes the - responsiblity to "interpret" the frequency listed in OPP and + clock frequency of the device, which is also attached + to a device by 1-to-1. The device registering devfreq takes the + responsiblity to "interpret" the representative frequency and to set its every clock accordingly with the "target" callback - given to DEVFREQ. + given to devfreq. + + When OPP is used with the devfreq device, it is recommended to + register devfreq's nb to the OPP's notifier head. If OPP is + used with the devfreq device, you may use OPP helper + functions defined in devfreq.h. if PM_DEVFREQ diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 5d15b812377b..59d24e9cb8c5 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -15,7 +15,9 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/slab.h> +#include <linux/stat.h> #include <linux/opp.h> #include <linux/devfreq.h> #include <linux/workqueue.h> @@ -416,10 +418,14 @@ out: */ int devfreq_remove_device(struct devfreq *devfreq) { + bool central_polling; + if (!devfreq) return -EINVAL; - if (!devfreq->governor->no_central_polling) { + central_polling = !devfreq->governor->no_central_polling; + + if (central_polling) { mutex_lock(&devfreq_list_lock); while (wait_remove_device == devfreq) { mutex_unlock(&devfreq_list_lock); @@ -431,7 +437,7 @@ int devfreq_remove_device(struct devfreq *devfreq) mutex_lock(&devfreq->lock); _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ - if (!devfreq->governor->no_central_polling) + if (central_polling) mutex_unlock(&devfreq_list_lock); return 0; diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 8af8e864a9cf..73464a62adf7 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -1128,7 +1128,7 @@ static struct of_device_id mpc85xx_mc_err_of_match[] = { { .compatible = "fsl,p1020-memory-controller", }, { .compatible = "fsl,p1021-memory-controller", }, { .compatible = "fsl,p2020-memory-controller", }, - { .compatible = "fsl,p4080-memory-controller", }, + { .compatible = "fsl,qoriq-memory-controller", }, {}, }; MODULE_DEVICE_TABLE(of, mpc85xx_mc_err_of_match); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index bcb1126e3d00..153980be4ee6 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -585,14 +585,12 @@ int dmi_name_in_serial(const char *str) } /** - * dmi_name_in_vendors - Check if string is anywhere in the DMI vendor information. + * dmi_name_in_vendors - Check if string is in the DMI system or board vendor name * @str: Case sensitive Name */ int dmi_name_in_vendors(const char *str) { - static int fields[] = { DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_SYS_VENDOR, - DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, DMI_BOARD_VENDOR, - DMI_BOARD_NAME, DMI_BOARD_VERSION, DMI_NONE }; + static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE }; int i; for (i = 0; fields[i] != DMI_NONE; i++) { int f = fields[i]; diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 8370f72d87ff..b0a81173a268 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi) } static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, - struct timespec *timespec, struct pstore_info *psi) + struct timespec *timespec, + char **buf, struct pstore_info *psi) { efi_guid_t vendor = LINUX_EFI_CRASH_GUID; struct efivars *efivars = psi->data; @@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, timespec->tv_nsec = 0; get_var_data_locked(efivars, &efivars->walk_entry->var); size = efivars->walk_entry->var.DataSize; - memcpy(psi->buf, efivars->walk_entry->var.Data, size); + *buf = kmalloc(size, GFP_KERNEL); + if (*buf == NULL) + return -ENOMEM; + memcpy(*buf, efivars->walk_entry->var.Data, + size); efivars->walk_entry = list_entry(efivars->walk_entry->list.next, struct efivar_entry, list); return size; @@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi) } static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, - struct timespec *time, struct pstore_info *psi) + struct timespec *timespec, + char **buf, struct pstore_info *psi) { return -1; } diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c index f10fc521951b..1eedb6f7fdab 100644 --- a/drivers/firmware/sigma.c +++ b/drivers/firmware/sigma.c @@ -14,13 +14,34 @@ #include <linux/module.h> #include <linux/sigma.h> -/* Return: 0==OK, <0==error, =1 ==no more actions */ +static size_t sigma_action_size(struct sigma_action *sa) +{ + size_t payload = 0; + + switch (sa->instr) { + case SIGMA_ACTION_WRITEXBYTES: + case SIGMA_ACTION_WRITESINGLE: + case SIGMA_ACTION_WRITESAFELOAD: + payload = sigma_action_len(sa); + break; + default: + break; + } + + payload = ALIGN(payload, 2); + + return payload + sizeof(struct sigma_action); +} + +/* + * Returns a negative error value in case of an error, 0 if processing of + * the firmware should be stopped after this action, 1 otherwise. + */ static int -process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw) +process_sigma_action(struct i2c_client *client, struct sigma_action *sa) { - struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos); size_t len = sigma_action_len(sa); - int ret = 0; + int ret; pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__, sa->instr, sa->addr, len); @@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw) case SIGMA_ACTION_WRITEXBYTES: case SIGMA_ACTION_WRITESINGLE: case SIGMA_ACTION_WRITESAFELOAD: - if (ssfw->fw->size < ssfw->pos + len) - return -EINVAL; ret = i2c_master_send(client, (void *)&sa->addr, len); if (ret < 0) return -EINVAL; break; - case SIGMA_ACTION_DELAY: - ret = 0; udelay(len); len = 0; break; - case SIGMA_ACTION_END: - return 1; - + return 0; default: return -EINVAL; } - /* when arrive here ret=0 or sent data */ - ssfw->pos += sigma_action_size(sa, len); - return ssfw->pos == ssfw->fw->size; + return 1; } static int process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw) { - pr_debug("%s: processing %p\n", __func__, ssfw); + struct sigma_action *sa; + size_t size; + int ret; + + while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) { + sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos); + + size = sigma_action_size(sa); + ssfw->pos += size; + if (ssfw->pos > ssfw->fw->size || size == 0) + break; + + ret = process_sigma_action(client, sa); - while (1) { - int ret = process_sigma_action(client, ssfw); pr_debug("%s: action returned %i\n", __func__, ret); - if (ret == 1) - return 0; - else if (ret) + + if (ret <= 0) return ret; } + + if (ssfw->pos != ssfw->fw->size) + return -EINVAL; + + return 0; } int process_sigma_firmware(struct i2c_client *client, const char *name) @@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name) /* then verify the header */ ret = -EINVAL; - if (fw->size < sizeof(*ssfw_head)) + + /* + * Reject too small or unreasonable large files. The upper limit has been + * chosen a bit arbitrarily, but it should be enough for all practical + * purposes and having the limit makes it easier to avoid integer + * overflows later in the loading process. + */ + if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000) goto done; ssfw_head = (void *)fw->data; if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic))) goto done; - crc = crc32(0, fw->data, fw->size); + crc = crc32(0, fw->data + sizeof(*ssfw_head), + fw->size - sizeof(*ssfw_head)); pr_debug("%s: crc=%x\n", __func__, crc); - if (crc != ssfw_head->crc) + if (crc != le32_to_cpu(ssfw_head->crc)) goto done; ssfw.pos = sizeof(*ssfw_head); diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index dbcb0bcfd8da..4e018d6a7639 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o -obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o +obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 0e49d87f6c60..0b0562979171 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -148,13 +148,17 @@ static int _get_gpio_dataout(struct gpio_bank *bank, int gpio) return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0; } -#define MOD_REG_BIT(reg, bit_mask, set) \ -do { \ - int l = __raw_readl(base + reg); \ - if (set) l |= bit_mask; \ - else l &= ~bit_mask; \ - __raw_writel(l, base + reg); \ -} while(0) +static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) +{ + int l = __raw_readl(base + reg); + + if (set) + l |= mask; + else + l &= ~mask; + + __raw_writel(l, base + reg); +} /** * _set_gpio_debounce - low level gpio debounce time @@ -210,28 +214,28 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio, u32 gpio_bit = 1 << gpio; if (cpu_is_omap44xx()) { - MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit, - trigger & IRQ_TYPE_LEVEL_LOW); - MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit, - trigger & IRQ_TYPE_LEVEL_HIGH); - MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit, - trigger & IRQ_TYPE_EDGE_RISING); - MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit, - trigger & IRQ_TYPE_EDGE_FALLING); + _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT0, gpio_bit, + trigger & IRQ_TYPE_LEVEL_LOW); + _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT1, gpio_bit, + trigger & IRQ_TYPE_LEVEL_HIGH); + _gpio_rmw(base, OMAP4_GPIO_RISINGDETECT, gpio_bit, + trigger & IRQ_TYPE_EDGE_RISING); + _gpio_rmw(base, OMAP4_GPIO_FALLINGDETECT, gpio_bit, + trigger & IRQ_TYPE_EDGE_FALLING); } else { - MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit, - trigger & IRQ_TYPE_LEVEL_LOW); - MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit, - trigger & IRQ_TYPE_LEVEL_HIGH); - MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit, - trigger & IRQ_TYPE_EDGE_RISING); - MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit, - trigger & IRQ_TYPE_EDGE_FALLING); + _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT0, gpio_bit, + trigger & IRQ_TYPE_LEVEL_LOW); + _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT1, gpio_bit, + trigger & IRQ_TYPE_LEVEL_HIGH); + _gpio_rmw(base, OMAP24XX_GPIO_RISINGDETECT, gpio_bit, + trigger & IRQ_TYPE_EDGE_RISING); + _gpio_rmw(base, OMAP24XX_GPIO_FALLINGDETECT, gpio_bit, + trigger & IRQ_TYPE_EDGE_FALLING); } if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { if (cpu_is_omap44xx()) { - MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit, - trigger != 0); + _gpio_rmw(base, OMAP4_GPIO_IRQWAKEN0, gpio_bit, + trigger != 0); } else { /* * GPIO wakeup request can only be generated on edge @@ -1086,6 +1090,11 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start, gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base, handle_simple_irq); + if (!gc) { + dev_err(bank->dev, "Memory alloc failed for gc\n"); + return; + } + ct = gc->chip_types; /* NOTE: No ack required, reading IRQ status clears it. */ diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 0550dcb85814..d3f3e8f54561 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -546,7 +546,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip) * Translate OpenFirmware node properties into platform_data * WARNING: This is DEPRECATED and will be removed eventually! */ -void +static void pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) { struct device_node *node; @@ -574,7 +574,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) *invert = *val; } #else -void +static void pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) { *gpio_base = -1; @@ -596,9 +596,6 @@ static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert) /* set platform specific polarity inversion */ ret = pca953x_write_reg(chip, PCA953X_INVERT, invert); - if (ret) - goto out; - return 0; out: return ret; } @@ -640,7 +637,7 @@ static int __devinit pca953x_probe(struct i2c_client *client, struct pca953x_platform_data *pdata; struct pca953x_chip *chip; int irq_base=0, invert=0; - int ret = 0; + int ret; chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); if (chip == NULL) @@ -673,10 +670,10 @@ static int __devinit pca953x_probe(struct i2c_client *client, pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK); if (chip->chip_type == PCA953X_TYPE) - device_pca953x_init(chip, invert); - else if (chip->chip_type == PCA957X_TYPE) - device_pca957x_init(chip, invert); + ret = device_pca953x_init(chip, invert); else + ret = device_pca957x_init(chip, invert); + if (ret) goto out_failed; ret = pca953x_irq_setup(chip, id, irq_base); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 785127cb281b..1368826ef284 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -9,7 +9,6 @@ menuconfig DRM depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU select I2C select I2C_ALGOBIT - select SLOW_WORK help Kernel-level support for the Direct Rendering Infrastructure (DRI) introduced in XFree86 4.0. If you say Y here, you need to select @@ -96,6 +95,7 @@ config DRM_I915 select FB_CFB_IMAGEBLIT # i915 depends on ACPI_VIDEO when ACPI is enabled # but for select to work, need to select ACPI_VIDEO's dependencies, ick + select BACKLIGHT_LCD_SUPPORT if ACPI select BACKLIGHT_CLASS_DEVICE if ACPI select VIDEO_OUTPUT_CONTROL if ACPI select INPUT if ACPI diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 9a2e2a14b3bb..8323fc389840 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1873,6 +1873,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, } if (num_clips && clips_ptr) { + if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { + ret = -EINVAL; + goto out_err1; + } clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); if (!clips) { ret = -ENOMEM; @@ -2118,8 +2122,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags, property->num_values = num_values; INIT_LIST_HEAD(&property->enum_blob_list); - if (name) + if (name) { strncpy(property->name, name, DRM_PROP_NAME_LEN); + property->name[DRM_PROP_NAME_LEN-1] = '\0'; + } list_add_tail(&property->head, &dev->mode_config.property_list); return property; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 2957636161e8..d2619d72cece 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -456,6 +456,30 @@ done: EXPORT_SYMBOL(drm_crtc_helper_set_mode); +static int +drm_crtc_helper_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_connector *connector; + struct drm_encoder *encoder; + + /* Decouple all encoders and their attached connectors from this crtc */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc != crtc) + continue; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder != encoder) + continue; + + connector->encoder = NULL; + } + } + + drm_helper_disable_unused_functions(dev); + return 0; +} + /** * drm_crtc_helper_set_config - set a new config from userspace * @crtc: CRTC to setup @@ -484,6 +508,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) struct drm_connector *save_connectors, *connector; int count = 0, ro, fail = 0; struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_mode_set save_set; int ret = 0; int i; @@ -509,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) (int)set->num_connectors, set->x, set->y); } else { DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); - set->mode = NULL; - set->num_connectors = 0; + return drm_crtc_helper_disable(set->crtc); } dev = set->crtc->dev; @@ -556,6 +580,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) save_connectors[count++] = *connector; } + save_set.crtc = set->crtc; + save_set.mode = &set->crtc->mode; + save_set.x = set->crtc->x; + save_set.y = set->crtc->y; + save_set.fb = set->crtc->fb; + /* We should be able to check here if the fb has the same properties * and then just flip_or_move it */ if (set->crtc->fb != set->fb) { @@ -721,6 +751,12 @@ fail: *connector = save_connectors[count++]; } + /* Try to restore the config */ + if (mode_changed && + !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x, + save_set.y, save_set.fb)) + DRM_ERROR("failed to restore config after modeset failure\n"); + kfree(save_connectors); kfree(save_encoders); kfree(save_crtcs); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index d067c12ba940..1c7a1c0d3edd 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -118,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, tmp->minor = minor; tmp->dent = ent; tmp->info_ent = &files[i]; - list_add(&(tmp->list), &(minor->debugfs_nodes.list)); + + mutex_lock(&minor->debugfs_lock); + list_add(&tmp->list, &minor->debugfs_list); + mutex_unlock(&minor->debugfs_lock); } return 0; @@ -146,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, char name[64]; int ret; - INIT_LIST_HEAD(&minor->debugfs_nodes.list); + INIT_LIST_HEAD(&minor->debugfs_list); + mutex_init(&minor->debugfs_lock); sprintf(name, "%d", minor_id); minor->debugfs_root = debugfs_create_dir(name, root); if (!minor->debugfs_root) { @@ -192,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count, struct drm_info_node *tmp; int i; + mutex_lock(&minor->debugfs_lock); for (i = 0; i < count; i++) { - list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { + list_for_each_safe(pos, q, &minor->debugfs_list) { tmp = list_entry(pos, struct drm_info_node, list); if (tmp->info_ent == &files[i]) { debugfs_remove(tmp->dent); @@ -202,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count, } } } + mutex_unlock(&minor->debugfs_lock); return 0; } EXPORT_SYMBOL(drm_debugfs_remove_files); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index fc81af9dbf42..40c187c60f44 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -125,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index cb3794a00f98..44a5d0ad8b7c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -110,10 +110,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) /* Prevent vblank irq processing while disabling vblank irqs, * so no updates of timestamps or count can happen after we've * disabled. Needed to prevent races in case of delayed irq's. - * Disable preemption, so vblank_time_lock is held as short as - * possible, even under a kernel with PREEMPT_RT patches. */ - preempt_disable(); spin_lock_irqsave(&dev->vblank_time_lock, irqflags); dev->driver->disable_vblank(dev, crtc); @@ -164,7 +161,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) clear_vblank_timestamps(dev, crtc); spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); - preempt_enable(); } static void vblank_disable_fn(unsigned long arg) @@ -407,13 +403,16 @@ int drm_irq_uninstall(struct drm_device *dev) /* * Wake up any waiters so they don't hang. */ - spin_lock_irqsave(&dev->vbl_lock, irqflags); - for (i = 0; i < dev->num_crtcs; i++) { - DRM_WAKEUP(&dev->vbl_queue[i]); - dev->vblank_enabled[i] = 0; - dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); + if (dev->num_crtcs) { + spin_lock_irqsave(&dev->vbl_lock, irqflags); + for (i = 0; i < dev->num_crtcs; i++) { + DRM_WAKEUP(&dev->vbl_queue[i]); + dev->vblank_enabled[i] = 0; + dev->last_vblank[i] = + dev->driver->get_vblank_counter(dev, i); + } + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); if (!irq_enabled) return -EINVAL; @@ -886,10 +885,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc) spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { - /* Disable preemption while holding vblank_time_lock. Do - * it explicitely to guard against PREEMPT_RT kernel. - */ - preempt_disable(); spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); if (!dev->vblank_enabled[crtc]) { /* Enable vblank irqs under vblank_time_lock protection. @@ -909,7 +904,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc) } } spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); - preempt_enable(); } else { if (!dev->vblank_enabled[crtc]) { atomic_dec(&dev->vblank_refcount[crtc]); @@ -1125,6 +1119,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, trace_drm_vblank_event_delivered(current->pid, pipe, vblwait->request.sequence); } else { + /* drm_handle_vblank_events will call drm_vblank_put */ list_add_tail(&e->base.link, &dev->vblank_event_list); vblwait->reply.sequence = vblwait->request.sequence; } @@ -1205,8 +1200,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data, goto done; } - if (flags & _DRM_VBLANK_EVENT) + if (flags & _DRM_VBLANK_EVENT) { + /* must hold on to the vblank ref until the event fires + * drm_vblank_put will be called asynchronously + */ return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); + } if ((flags & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1<<23)) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c index 6f8afea94fc9..2bb07bca511a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c @@ -27,82 +27,84 @@ #include "drm.h" #include "exynos_drm_drv.h" +#include "exynos_drm_gem.h" #include "exynos_drm_buf.h" -static DEFINE_MUTEX(exynos_drm_buf_lock); - static int lowlevel_buffer_allocate(struct drm_device *dev, - struct exynos_drm_buf_entry *entry) + struct exynos_drm_gem_buf *buffer) { DRM_DEBUG_KMS("%s\n", __FILE__); - entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size, - (dma_addr_t *)&entry->paddr, GFP_KERNEL); - if (!entry->paddr) { + buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size, + &buffer->dma_addr, GFP_KERNEL); + if (!buffer->kvaddr) { DRM_ERROR("failed to allocate buffer.\n"); return -ENOMEM; } - DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n", - (unsigned int)entry->vaddr, entry->paddr, entry->size); + DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", + (unsigned long)buffer->kvaddr, + (unsigned long)buffer->dma_addr, + buffer->size); return 0; } static void lowlevel_buffer_deallocate(struct drm_device *dev, - struct exynos_drm_buf_entry *entry) + struct exynos_drm_gem_buf *buffer) { DRM_DEBUG_KMS("%s.\n", __FILE__); - if (entry->paddr && entry->vaddr && entry->size) - dma_free_writecombine(dev->dev, entry->size, entry->vaddr, - entry->paddr); + if (buffer->dma_addr && buffer->size) + dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr, + (dma_addr_t)buffer->dma_addr); else - DRM_DEBUG_KMS("entry data is null.\n"); + DRM_DEBUG_KMS("buffer data are invalid.\n"); } -struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, +struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev, unsigned int size) { - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; DRM_DEBUG_KMS("%s.\n", __FILE__); + DRM_DEBUG_KMS("desired size = 0x%x\n", size); - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { - DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n"); + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) { + DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n"); return ERR_PTR(-ENOMEM); } - entry->size = size; + buffer->size = size; /* * allocate memory region with size and set the memory information - * to vaddr and paddr of a entry object. + * to vaddr and dma_addr of a buffer object. */ - if (lowlevel_buffer_allocate(dev, entry) < 0) { - kfree(entry); - entry = NULL; + if (lowlevel_buffer_allocate(dev, buffer) < 0) { + kfree(buffer); + buffer = NULL; return ERR_PTR(-ENOMEM); } - return entry; + return buffer; } void exynos_drm_buf_destroy(struct drm_device *dev, - struct exynos_drm_buf_entry *entry) + struct exynos_drm_gem_buf *buffer) { DRM_DEBUG_KMS("%s.\n", __FILE__); - if (!entry) { - DRM_DEBUG_KMS("entry is null.\n"); + if (!buffer) { + DRM_DEBUG_KMS("buffer is null.\n"); return; } - lowlevel_buffer_deallocate(dev, entry); + lowlevel_buffer_deallocate(dev, buffer); - kfree(entry); - entry = NULL; + kfree(buffer); + buffer = NULL; } MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h index 045d59eab01a..6e91f9caa5db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_buf.h +++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h @@ -26,28 +26,15 @@ #ifndef _EXYNOS_DRM_BUF_H_ #define _EXYNOS_DRM_BUF_H_ -/* - * exynos drm buffer entry structure. - * - * @paddr: physical address of allocated memory. - * @vaddr: kernel virtual address of allocated memory. - * @size: size of allocated memory. - */ -struct exynos_drm_buf_entry { - dma_addr_t paddr; - void __iomem *vaddr; - unsigned int size; -}; - /* allocate physical memory. */ -struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, +struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev, unsigned int size); -/* get physical memory information of a drm framebuffer. */ -struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); +/* get memory information of a drm framebuffer. */ +struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); /* remove allocated physical memory. */ void exynos_drm_buf_destroy(struct drm_device *dev, - struct exynos_drm_buf_entry *entry); + struct exynos_drm_gem_buf *buffer); #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index 985d9e768728..d620b0784257 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -37,6 +37,8 @@ struct exynos_drm_connector { struct drm_connector drm_connector; + uint32_t encoder_id; + struct exynos_drm_manager *manager; }; /* convert exynos_video_timings to drm_display_mode */ @@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode, DRM_DEBUG_KMS("%s\n", __FILE__); mode->clock = timing->pixclock / 1000; + mode->vrefresh = timing->refresh; mode->hdisplay = timing->xres; mode->hsync_start = mode->hdisplay + timing->left_margin; @@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode, mode->vsync_start = mode->vdisplay + timing->upper_margin; mode->vsync_end = mode->vsync_start + timing->vsync_len; mode->vtotal = mode->vsync_end + timing->lower_margin; + + if (timing->vmode & FB_VMODE_INTERLACED) + mode->flags |= DRM_MODE_FLAG_INTERLACE; + + if (timing->vmode & FB_VMODE_DOUBLE) + mode->flags |= DRM_MODE_FLAG_DBLSCAN; } /* convert drm_display_mode to exynos_video_timings */ @@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing, memset(timing, 0, sizeof(*timing)); timing->pixclock = mode->clock * 1000; - timing->refresh = mode->vrefresh; + timing->refresh = drm_mode_vrefresh(mode); timing->xres = mode->hdisplay; timing->left_margin = mode->hsync_start - mode->hdisplay; @@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing, static int exynos_drm_connector_get_modes(struct drm_connector *connector) { - struct exynos_drm_manager *manager = - exynos_drm_get_manager(connector->encoder); - struct exynos_drm_display *display = manager->display; + struct exynos_drm_connector *exynos_connector = + to_exynos_connector(connector); + struct exynos_drm_manager *manager = exynos_connector->manager; + struct exynos_drm_display_ops *display_ops = manager->display_ops; unsigned int count; DRM_DEBUG_KMS("%s\n", __FILE__); - if (!display) { - DRM_DEBUG_KMS("display is null.\n"); + if (!display_ops) { + DRM_DEBUG_KMS("display_ops is null.\n"); return 0; } @@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) * P.S. in case of lcd panel, count is always 1 if success * because lcd panel has only one mode. */ - if (display->get_edid) { + if (display_ops->get_edid) { int ret; void *edid; @@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) return 0; } - ret = display->get_edid(manager->dev, connector, + ret = display_ops->get_edid(manager->dev, connector, edid, MAX_EDID); if (ret < 0) { DRM_ERROR("failed to get edid data.\n"); @@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) struct drm_display_mode *mode = drm_mode_create(connector->dev); struct fb_videomode *timing; - if (display->get_timing) - timing = display->get_timing(manager->dev); + if (display_ops->get_timing) + timing = display_ops->get_timing(manager->dev); else { drm_mode_destroy(connector->dev, mode); return 0; @@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) static int exynos_drm_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct exynos_drm_manager *manager = - exynos_drm_get_manager(connector->encoder); - struct exynos_drm_display *display = manager->display; + struct exynos_drm_connector *exynos_connector = + to_exynos_connector(connector); + struct exynos_drm_manager *manager = exynos_connector->manager; + struct exynos_drm_display_ops *display_ops = manager->display_ops; struct fb_videomode timing; int ret = MODE_BAD; @@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector, convert_to_video_timing(&timing, mode); - if (display && display->check_timing) - if (!display->check_timing(manager->dev, (void *)&timing)) + if (display_ops && display_ops->check_timing) + if (!display_ops->check_timing(manager->dev, (void *)&timing)) ret = MODE_OK; return ret; @@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector, struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) { + struct drm_device *dev = connector->dev; + struct exynos_drm_connector *exynos_connector = + to_exynos_connector(connector); + struct drm_mode_object *obj; + struct drm_encoder *encoder; + DRM_DEBUG_KMS("%s\n", __FILE__); - return connector->encoder; + obj = drm_mode_object_find(dev, exynos_connector->encoder_id, + DRM_MODE_OBJECT_ENCODER); + if (!obj) { + DRM_DEBUG_KMS("Unknown ENCODER ID %d\n", + exynos_connector->encoder_id); + return NULL; + } + + encoder = obj_to_encoder(obj); + + return encoder; } static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { @@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { static enum drm_connector_status exynos_drm_connector_detect(struct drm_connector *connector, bool force) { - struct exynos_drm_manager *manager = - exynos_drm_get_manager(connector->encoder); - struct exynos_drm_display *display = manager->display; + struct exynos_drm_connector *exynos_connector = + to_exynos_connector(connector); + struct exynos_drm_manager *manager = exynos_connector->manager; + struct exynos_drm_display_ops *display_ops = + manager->display_ops; enum drm_connector_status status = connector_status_disconnected; DRM_DEBUG_KMS("%s\n", __FILE__); - if (display && display->is_connected) { - if (display->is_connected(manager->dev)) + if (display_ops && display_ops->is_connected) { + if (display_ops->is_connected(manager->dev)) status = connector_status_connected; else status = connector_status_disconnected; @@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, connector = &exynos_connector->drm_connector; - switch (manager->display->type) { + switch (manager->display_ops->type) { case EXYNOS_DISPLAY_TYPE_HDMI: type = DRM_MODE_CONNECTOR_HDMIA; + connector->interlace_allowed = true; + connector->polled = DRM_CONNECTOR_POLL_HPD; break; default: type = DRM_MODE_CONNECTOR_Unknown; @@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, if (err) goto err_connector; + exynos_connector->encoder_id = encoder->base.id; + exynos_connector->manager = manager; connector->encoder = encoder; + err = drm_mode_connector_attach_encoder(connector, encoder); if (err) { DRM_ERROR("failed to attach a connector to a encoder\n"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 9337e5e2dbb6..ee43cc220853 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -29,36 +29,17 @@ #include "drmP.h" #include "drm_crtc_helper.h" +#include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_encoder.h" +#include "exynos_drm_gem.h" #include "exynos_drm_buf.h" #define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ drm_crtc) /* - * Exynos specific crtc postion structure. - * - * @fb_x: offset x on a framebuffer to be displyed - * - the unit is screen coordinates. - * @fb_y: offset y on a framebuffer to be displayed - * - the unit is screen coordinates. - * @crtc_x: offset x on hardware screen. - * @crtc_y: offset y on hardware screen. - * @crtc_w: width of hardware screen. - * @crtc_h: height of hardware screen. - */ -struct exynos_drm_crtc_pos { - unsigned int fb_x; - unsigned int fb_y; - unsigned int crtc_x; - unsigned int crtc_y; - unsigned int crtc_w; - unsigned int crtc_h; -}; - -/* * Exynos specific crtc structure. * * @drm_crtc: crtc object. @@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc) exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_crtc_mode_set); - exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit); + exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, + exynos_drm_encoder_crtc_commit); } -static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, - struct drm_framebuffer *fb, - struct drm_display_mode *mode, - struct exynos_drm_crtc_pos *pos) +int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, + struct drm_framebuffer *fb, + struct drm_display_mode *mode, + struct exynos_drm_crtc_pos *pos) { - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; unsigned int actual_w; unsigned int actual_h; - entry = exynos_drm_fb_get_buf(fb); - if (!entry) { - DRM_LOG_KMS("entry is null.\n"); + buffer = exynos_drm_fb_get_buf(fb); + if (!buffer) { + DRM_LOG_KMS("buffer is null.\n"); return -EFAULT; } - overlay->paddr = entry->paddr; - overlay->vaddr = entry->vaddr; + overlay->dma_addr = buffer->dma_addr; + overlay->vaddr = buffer->kvaddr; - DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", + DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", (unsigned long)overlay->vaddr, - (unsigned long)overlay->paddr); + (unsigned long)overlay->dma_addr); actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); @@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc) static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) { - DRM_DEBUG_KMS("%s\n", __FILE__); + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - /* TODO */ + DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); + + switch (mode) { + case DRM_MODE_DPMS_ON: + exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, + exynos_drm_encoder_crtc_commit); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + /* TODO */ + exynos_drm_fn_encoder(crtc, NULL, + exynos_drm_encoder_crtc_disable); + break; + default: + DRM_DEBUG_KMS("unspecified mode %d\n", mode); + break; + } } static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) @@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) static void exynos_drm_crtc_commit(struct drm_crtc *crtc) { + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + DRM_DEBUG_KMS("%s\n", __FILE__); - /* drm framework doesn't check NULL. */ + exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, + exynos_drm_encoder_crtc_commit); } static bool diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index c584042d6d2c..25f72a62cb88 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr); int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); +/* + * Exynos specific crtc postion structure. + * + * @fb_x: offset x on a framebuffer to be displyed + * - the unit is screen coordinates. + * @fb_y: offset y on a framebuffer to be displayed + * - the unit is screen coordinates. + * @crtc_x: offset x on hardware screen. + * @crtc_y: offset y on hardware screen. + * @crtc_w: width of hardware screen. + * @crtc_h: height of hardware screen. + */ +struct exynos_drm_crtc_pos { + unsigned int fb_x; + unsigned int fb_y; + unsigned int crtc_x; + unsigned int crtc_y; + unsigned int crtc_w; + unsigned int crtc_h; +}; + +int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, + struct drm_framebuffer *fb, + struct drm_display_mode *mode, + struct exynos_drm_crtc_pos *pos); #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 83810cbe3c17..53e2216de61d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -27,6 +27,7 @@ #include "drmP.h" #include "drm.h" +#include "drm_crtc_helper.h" #include <drm/exynos_drm.h> @@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) drm_mode_config_init(dev); + /* init kms poll for handling hpd */ + drm_kms_helper_poll_init(dev); + exynos_drm_mode_config_init(dev); /* @@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev) exynos_drm_fbdev_fini(dev); exynos_drm_device_unregister(dev); drm_vblank_cleanup(dev); + drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); kfree(dev->dev_private); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index c03683f2ae72..5e02e6ecc2e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -29,6 +29,7 @@ #ifndef _EXYNOS_DRM_DRV_H_ #define _EXYNOS_DRM_DRV_H_ +#include <linux/module.h> #include "drm.h" #define MAX_CRTC 2 @@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops { * @scan_flag: interlace or progressive way. * (it could be DRM_MODE_FLAG_*) * @bpp: pixel size.(in bit) - * @paddr: bus(accessed by dma) physical memory address to this overlay - * and this is physically continuous. + * @dma_addr: bus(accessed by dma) address to the memory region allocated + * for a overlay. * @vaddr: virtual memory addresss to this overlay. * @default_win: a window to be enabled. * @color_key: color key on or off. @@ -108,7 +109,7 @@ struct exynos_drm_overlay { unsigned int scan_flag; unsigned int bpp; unsigned int pitch; - dma_addr_t paddr; + dma_addr_t dma_addr; void __iomem *vaddr; bool default_win; @@ -130,7 +131,7 @@ struct exynos_drm_overlay { * @check_timing: check if timing is valid or not. * @power_on: display device on or off. */ -struct exynos_drm_display { +struct exynos_drm_display_ops { enum exynos_drm_output_type type; bool (*is_connected)(struct device *dev); int (*get_edid)(struct device *dev, struct drm_connector *connector, @@ -146,12 +147,14 @@ struct exynos_drm_display { * @mode_set: convert drm_display_mode to hw specific display mode and * would be called by encoder->mode_set(). * @commit: set current hw specific display mode to hw. + * @disable: disable hardware specific display mode. * @enable_vblank: specific driver callback for enabling vblank interrupt. * @disable_vblank: specific driver callback for disabling vblank interrupt. */ struct exynos_drm_manager_ops { void (*mode_set)(struct device *subdrv_dev, void *mode); void (*commit)(struct device *subdrv_dev); + void (*disable)(struct device *subdrv_dev); int (*enable_vblank)(struct device *subdrv_dev); void (*disable_vblank)(struct device *subdrv_dev); }; @@ -178,7 +181,7 @@ struct exynos_drm_manager { int pipe; struct exynos_drm_manager_ops *ops; struct exynos_drm_overlay_ops *overlay_ops; - struct exynos_drm_display *display; + struct exynos_drm_display_ops *display_ops; }; /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 7cf6fa86a67e..153061415baf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); + struct exynos_drm_manager_ops *manager_ops = manager->ops; DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); + switch (mode) { + case DRM_MODE_DPMS_ON: + if (manager_ops && manager_ops->commit) + manager_ops->commit(manager->dev); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + /* TODO */ + if (manager_ops && manager_ops->disable) + manager_ops->disable(manager->dev); + break; + default: + DRM_ERROR("unspecified mode %d\n", mode); + break; + } + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { - struct exynos_drm_display *display = manager->display; + struct exynos_drm_display_ops *display_ops = + manager->display_ops; - if (display && display->power_on) - display->power_on(manager->dev, mode); + DRM_DEBUG_KMS("connector[%d] dpms[%d]\n", + connector->base.id, mode); + if (display_ops && display_ops->power_on) + display_ops->power_on(manager->dev, mode); } } } @@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder) { struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; - struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; DRM_DEBUG_KMS("%s\n", __FILE__); if (manager_ops && manager_ops->commit) manager_ops->commit(manager->dev); - - if (overlay_ops && overlay_ops->commit) - overlay_ops->commit(manager->dev); } static struct drm_crtc * @@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data, { struct drm_device *dev = crtc->dev; struct drm_encoder *encoder; + struct exynos_drm_private *private = dev->dev_private; + struct exynos_drm_manager *manager; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc != crtc) - continue; + /* + * if crtc is detached from encoder, check pipe, + * otherwise check crtc attached to encoder + */ + if (!encoder->crtc) { + manager = to_exynos_encoder(encoder)->manager; + if (manager->pipe < 0 || + private->crtc[manager->pipe] != crtc) + continue; + } else { + if (encoder->crtc != crtc) + continue; + } fn(encoder, data); } @@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data) struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; + int crtc = *(int *)data; + + DRM_DEBUG_KMS("%s\n", __FILE__); + + /* + * when crtc is detached from encoder, this pipe is used + * to select manager operation + */ + manager->pipe = crtc; - overlay_ops->commit(manager->dev); + if (overlay_ops && overlay_ops->commit) + overlay_ops->commit(manager->dev); } void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) @@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; struct exynos_drm_overlay *overlay = data; - overlay_ops->mode_set(manager->dev, overlay); + if (overlay_ops && overlay_ops->mode_set) + overlay_ops->mode_set(manager->dev, overlay); +} + +void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data) +{ + struct exynos_drm_manager *manager = + to_exynos_encoder(encoder)->manager; + struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; + + DRM_DEBUG_KMS("\n"); + + if (overlay_ops && overlay_ops->disable) + overlay_ops->disable(manager->dev); + + /* + * crtc is already detached from encoder and last + * function for detaching is properly done, so + * clear pipe from manager to prevent repeated call + */ + if (!encoder->crtc) + manager->pipe = -1; } MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h index 5ecd645d06a9..a22acfbf0e4e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h @@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data); void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); +void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data); #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 48d29cfd5240..5bf4a1ac7f82 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -29,7 +29,9 @@ #include "drmP.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" +#include "drm_fb_helper.h" +#include "exynos_drm_drv.h" #include "exynos_drm_fb.h" #include "exynos_drm_buf.h" #include "exynos_drm_gem.h" @@ -41,14 +43,14 @@ * * @fb: drm framebuffer obejct. * @exynos_gem_obj: exynos specific gem object containing a gem object. - * @entry: pointer to exynos drm buffer entry object. - * - containing only the information to physically continuous memory - * region allocated at default framebuffer creation. + * @buffer: pointer to exynos_drm_gem_buffer object. + * - contain the memory information to memory region allocated + * at default framebuffer creation. */ struct exynos_drm_fb { struct drm_framebuffer fb; struct exynos_drm_gem_obj *exynos_gem_obj; - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; }; static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) @@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) * default framebuffer has no gem object so * a buffer of the default framebuffer should be released at here. */ - if (!exynos_fb->exynos_gem_obj && exynos_fb->entry) - exynos_drm_buf_destroy(fb->dev, exynos_fb->entry); + if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer) + exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer); kfree(exynos_fb); exynos_fb = NULL; @@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev, */ if (!mode_cmd->handle) { if (!file_priv) { - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; /* * in case that file_priv is NULL, it allocates * only buffer and this buffer would be used * for default framebuffer. */ - entry = exynos_drm_buf_create(dev, size); - if (IS_ERR(entry)) { - ret = PTR_ERR(entry); + buffer = exynos_drm_buf_create(dev, size); + if (IS_ERR(buffer)) { + ret = PTR_ERR(buffer); goto err_buffer; } - exynos_fb->entry = entry; + exynos_fb->buffer = buffer; - DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n", - (unsigned long)entry->paddr, size); + DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n", + (unsigned long)buffer->dma_addr, size); goto out; } else { - exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, - size, - &mode_cmd->handle); + exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, + &mode_cmd->handle, + size); if (IS_ERR(exynos_gem_obj)) { ret = PTR_ERR(exynos_gem_obj); goto err_buffer; @@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev, * so that default framebuffer has no its own gem object, * only its own buffer object. */ - exynos_fb->entry = exynos_gem_obj->entry; + exynos_fb->buffer = exynos_gem_obj->buffer; - DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n", - (unsigned long)exynos_fb->entry->paddr, size, + DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n", + (unsigned long)exynos_fb->buffer->dma_addr, size, (unsigned int)&exynos_gem_obj->base); out: @@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev, return exynos_drm_fb_init(file_priv, dev, mode_cmd); } -struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) +struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; DRM_DEBUG_KMS("%s\n", __FILE__); - entry = exynos_fb->entry; - if (!entry) + buffer = exynos_fb->buffer; + if (!buffer) return NULL; - DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", - (unsigned long)entry->vaddr, - (unsigned long)entry->paddr); + DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", + (unsigned long)buffer->kvaddr, + (unsigned long)buffer->dma_addr); - return entry; + return buffer; +} + +static void exynos_drm_output_poll_changed(struct drm_device *dev) +{ + struct exynos_drm_private *private = dev->dev_private; + struct drm_fb_helper *fb_helper = private->fb_helper; + + if (fb_helper) + drm_fb_helper_hotplug_event(fb_helper); } static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { .fb_create = exynos_drm_fb_create, + .output_poll_changed = exynos_drm_output_poll_changed, }; void exynos_drm_mode_config_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 1f4b3d1a7713..836f41008187 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -33,6 +33,7 @@ #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" +#include "exynos_drm_gem.h" #include "exynos_drm_buf.h" #define MAX_CONNECTOR 4 @@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = { }; static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, - struct drm_framebuffer *fb, - unsigned int fb_width, - unsigned int fb_height) + struct drm_framebuffer *fb) { struct fb_info *fbi = helper->fbdev; struct drm_device *dev = helper->dev; struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper); - struct exynos_drm_buf_entry *entry; - unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3); + struct exynos_drm_gem_buf *buffer; + unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); unsigned long offset; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, exynos_fb->fb = fb; drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth); - drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height); + drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); - entry = exynos_drm_fb_get_buf(fb); - if (!entry) { - DRM_LOG_KMS("entry is null.\n"); + buffer = exynos_drm_fb_get_buf(fb); + if (!buffer) { + DRM_LOG_KMS("buffer is null.\n"); return -EFAULT; } offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); offset += fbi->var.yoffset * fb->pitch; - dev->mode_config.fb_base = entry->paddr; - fbi->screen_base = entry->vaddr + offset; - fbi->fix.smem_start = entry->paddr + offset; + dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; + fbi->screen_base = buffer->kvaddr + offset; + fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset); fbi->screen_size = size; fbi->fix.smem_len = size; @@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper, goto out; } - ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, - sizes->fb_height); + ret = exynos_drm_fbdev_update(helper, helper->fb); if (ret < 0) fb_dealloc_cmap(&fbi->cmap); @@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper, } helper->fb = exynos_fbdev->fb; - return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, - sizes->fb_height); + return exynos_drm_fbdev_update(helper, helper->fb); } static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper, @@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev) fb_helper = private->fb_helper; if (fb_helper) { + struct list_head temp_list; + + INIT_LIST_HEAD(&temp_list); + + /* + * fb_helper is reintialized but kernel fb is reused + * so kernel_fb_list need to be backuped and restored + */ + if (!list_empty(&fb_helper->kernel_fb_list)) + list_replace_init(&fb_helper->kernel_fb_list, + &temp_list); + drm_fb_helper_fini(fb_helper); ret = drm_fb_helper_init(dev, fb_helper, @@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev) return ret; } + if (!list_empty(&temp_list)) + list_replace(&temp_list, &fb_helper->kernel_fb_list); + ret = drm_fb_helper_single_add_all_connectors(fb_helper); if (ret < 0) { DRM_ERROR("failed to add fb helper to connectors\n"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 4659c88cdd9b..db3b3d9e731d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -64,7 +64,7 @@ struct fimd_win_data { unsigned int fb_width; unsigned int fb_height; unsigned int bpp; - dma_addr_t paddr; + dma_addr_t dma_addr; void __iomem *vaddr; unsigned int buf_offsize; unsigned int line_size; /* bytes */ @@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode) return 0; } -static struct exynos_drm_display fimd_display = { +static struct exynos_drm_display_ops fimd_display_ops = { .type = EXYNOS_DISPLAY_TYPE_LCD, .is_connected = fimd_display_is_connected, .get_timing = fimd_get_timing, @@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev) writel(val, ctx->regs + VIDCON0); } +static void fimd_disable(struct device *dev) +{ + struct fimd_context *ctx = get_fimd_context(dev); + struct exynos_drm_subdrv *subdrv = &ctx->subdrv; + struct drm_device *drm_dev = subdrv->drm_dev; + struct exynos_drm_manager *manager = &subdrv->manager; + u32 val; + + DRM_DEBUG_KMS("%s\n", __FILE__); + + /* fimd dma off */ + val = readl(ctx->regs + VIDCON0); + val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F); + writel(val, ctx->regs + VIDCON0); + + /* + * if vblank is enabled status with dma off then + * it disables vsync interrupt. + */ + if (drm_dev->vblank_enabled[manager->pipe] && + atomic_read(&drm_dev->vblank_refcount[manager->pipe])) { + drm_vblank_put(drm_dev, manager->pipe); + + /* + * if vblank_disable_allowed is 0 then disable + * vsync interrupt right now else the vsync interrupt + * would be disabled by drm timer once a current process + * gives up ownershop of vblank event. + */ + if (!drm_dev->vblank_disable_allowed) + drm_vblank_off(drm_dev, manager->pipe); + } +} + static int fimd_enable_vblank(struct device *dev) { struct fimd_context *ctx = get_fimd_context(dev); @@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev) static struct exynos_drm_manager_ops fimd_manager_ops = { .commit = fimd_commit, + .disable = fimd_disable, .enable_vblank = fimd_enable_vblank, .disable_vblank = fimd_disable_vblank, }; @@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev, win_data->ovl_height = overlay->crtc_height; win_data->fb_width = overlay->fb_width; win_data->fb_height = overlay->fb_height; - win_data->paddr = overlay->paddr + offset; + win_data->dma_addr = overlay->dma_addr + offset; win_data->vaddr = overlay->vaddr + offset; win_data->bpp = overlay->bpp; win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * @@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev, DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", win_data->ovl_width, win_data->ovl_height); DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", - (unsigned long)win_data->paddr, + (unsigned long)win_data->dma_addr, (unsigned long)win_data->vaddr); DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", overlay->fb_width, overlay->crtc_width); @@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev) writel(val, ctx->regs + SHADOWCON); /* buffer start address */ - val = win_data->paddr; + val = (unsigned long)win_data->dma_addr; writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); /* buffer end address */ size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); - val = win_data->paddr + size; + val = (unsigned long)(win_data->dma_addr + size); writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", - (unsigned long)win_data->paddr, val, size); + (unsigned long)win_data->dma_addr, val, size); DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", win_data->ovl_width, win_data->ovl_height); @@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev) static void fimd_win_disable(struct device *dev) { struct fimd_context *ctx = get_fimd_context(dev); - struct fimd_win_data *win_data; int win = ctx->default_win; u32 val; @@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev) if (win < 0 || win > WINDOWS_NR) return; - win_data = &ctx->win_data[win]; - /* protect windows */ val = readl(ctx->regs + SHADOWCON); val |= SHADOWCON_WINx_PROTECT(win); @@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id) /* VSYNC interrupt */ writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); + /* + * in case that vblank_disable_allowed is 1, it could induce + * the problem that manager->pipe could be -1 because with + * disable callback, vsync interrupt isn't disabled and at this moment, + * vsync interrupt could occur. the vsync interrupt would be disabled + * by timer handler later. + */ + if (manager->pipe == -1) + return IRQ_HANDLED; + drm_handle_vblank(drm_dev, manager->pipe); fimd_finish_pageflip(drm_dev, manager->pipe); @@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev) */ drm_dev->irq_enabled = 1; - /* - * with vblank_disable_allowed = 1, vblank interrupt will be disabled - * by drm timer once a current process gives up ownership of - * vblank event.(drm_vblank_put function was called) - */ - drm_dev->vblank_disable_allowed = 1; - return 0; } @@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev) subdrv->manager.pipe = -1; subdrv->manager.ops = &fimd_manager_ops; subdrv->manager.overlay_ops = &fimd_overlay_ops; - subdrv->manager.display = &fimd_display; + subdrv->manager.display_ops = &fimd_display_ops; subdrv->manager.dev = dev; platform_set_drvdata(pdev, ctx); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index a8e7a88906ed..aba0fe47f7ea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; } -struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, - struct drm_device *dev, unsigned int size, - unsigned int *handle) +static struct exynos_drm_gem_obj + *exynos_drm_gem_init(struct drm_device *drm_dev, + struct drm_file *file_priv, unsigned int *handle, + unsigned int size) { struct exynos_drm_gem_obj *exynos_gem_obj; - struct exynos_drm_buf_entry *entry; struct drm_gem_object *obj; int ret; - DRM_DEBUG_KMS("%s\n", __FILE__); - - size = roundup(size, PAGE_SIZE); - exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); if (!exynos_gem_obj) { DRM_ERROR("failed to allocate exynos gem object.\n"); return ERR_PTR(-ENOMEM); } - /* allocate the new buffer object and memory region. */ - entry = exynos_drm_buf_create(dev, size); - if (!entry) { - kfree(exynos_gem_obj); - return ERR_PTR(-ENOMEM); - } - - exynos_gem_obj->entry = entry; - obj = &exynos_gem_obj->base; - ret = drm_gem_object_init(dev, obj, size); + ret = drm_gem_object_init(drm_dev, obj, size); if (ret < 0) { - DRM_ERROR("failed to initailize gem object.\n"); - goto err_obj_init; + DRM_ERROR("failed to initialize gem object.\n"); + ret = -EINVAL; + goto err_object_init; } DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); @@ -127,24 +115,50 @@ err_handle_create: err_create_mmap_offset: drm_gem_object_release(obj); -err_obj_init: - exynos_drm_buf_destroy(dev, exynos_gem_obj->entry); - +err_object_init: kfree(exynos_gem_obj); return ERR_PTR(ret); } +struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, + struct drm_file *file_priv, + unsigned int *handle, unsigned long size) +{ + + struct exynos_drm_gem_obj *exynos_gem_obj = NULL; + struct exynos_drm_gem_buf *buffer; + + size = roundup(size, PAGE_SIZE); + + DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size); + + buffer = exynos_drm_buf_create(dev, size); + if (IS_ERR(buffer)) { + return ERR_CAST(buffer); + } + + exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size); + if (IS_ERR(exynos_gem_obj)) { + exynos_drm_buf_destroy(dev, buffer); + return exynos_gem_obj; + } + + exynos_gem_obj->buffer = buffer; + + return exynos_gem_obj; +} + int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file_priv) { struct drm_exynos_gem_create *args = data; - struct exynos_drm_gem_obj *exynos_gem_obj; + struct exynos_drm_gem_obj *exynos_gem_obj = NULL; - DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size); + DRM_DEBUG_KMS("%s\n", __FILE__); - exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, - &args->handle); + exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, + &args->handle, args->size); if (IS_ERR(exynos_gem_obj)) return PTR_ERR(exynos_gem_obj); @@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, { struct drm_gem_object *obj = filp->private_data; struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; unsigned long pfn, vm_size; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, vm_size = vma->vm_end - vma->vm_start; /* - * a entry contains information to physically continuous memory + * a buffer contains information to physically continuous memory * allocated by user request or at framebuffer creation. */ - entry = exynos_gem_obj->entry; + buffer = exynos_gem_obj->buffer; /* check if user-requested size is valid. */ - if (vm_size > entry->size) + if (vm_size > buffer->size) return -EINVAL; /* * get page frame number to physical memory to be mapped * to user space. */ - pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT; + pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT; DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); @@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj) exynos_gem_obj = to_exynos_gem_obj(gem_obj); - exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry); + exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer); kfree(exynos_gem_obj); } @@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, args->pitch = args->width * args->bpp >> 3; args->size = args->pitch * args->height; - exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, - &args->handle); + exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle, + args->size); if (IS_ERR(exynos_gem_obj)) return PTR_ERR(exynos_gem_obj); @@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) mutex_lock(&dev->struct_mutex); - pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset; + pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >> + PAGE_SHIFT) + page_offset; ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index e5fc0148277b..ef8797334e6d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -30,13 +30,29 @@ struct exynos_drm_gem_obj, base) /* + * exynos drm gem buffer structure. + * + * @kvaddr: kernel virtual address to allocated memory region. + * @dma_addr: bus address(accessed by dma) to allocated memory region. + * - this address could be physical address without IOMMU and + * device address with IOMMU. + * @size: size of allocated memory region. + */ +struct exynos_drm_gem_buf { + void __iomem *kvaddr; + dma_addr_t dma_addr; + unsigned long size; +}; + +/* * exynos drm buffer structure. * * @base: a gem object. * - a new handle to this gem object would be created * by drm_gem_handle_create(). - * @entry: pointer to exynos drm buffer entry object. - * - containing the information to physically + * @buffer: a pointer to exynos_drm_gem_buffer object. + * - contain the information to memory region allocated + * by user request or at framebuffer creation. * continuous memory region allocated by user request * or at framebuffer creation. * @@ -45,13 +61,13 @@ */ struct exynos_drm_gem_obj { struct drm_gem_object base; - struct exynos_drm_buf_entry *entry; + struct exynos_drm_gem_buf *buffer; }; /* create a new buffer and get a new gem handle. */ -struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, - struct drm_device *dev, unsigned int size, - unsigned int *handle); +struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, + struct drm_file *file_priv, + unsigned int *handle, unsigned long size); /* * request gem object creation and buffer allocation as the size diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d14b44e13f51..d09a6e02dc95 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -636,11 +636,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; + int ret; ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; if (ring->size == 0) return 0; + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + seq_printf(m, "Ring %s:\n", ring->name); seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); @@ -654,6 +659,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -842,7 +849,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u16 crstanddelay = I915_READ16(CRSTANDVID); + u16 crstanddelay; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + crstanddelay = I915_READ16(CRSTANDVID); + + mutex_unlock(&dev->struct_mutex); seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); @@ -940,7 +956,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 delayfreq; - int i; + int ret, i; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; for (i = 0; i < 16; i++) { delayfreq = I915_READ(PXVFREQ_BASE + i * 4); @@ -948,6 +968,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused) (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); } + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -962,13 +984,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 inttoext; - int i; + int ret, i; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; for (i = 1; i <= 32; i++) { inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); } + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -977,9 +1005,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 rgvmodectl = I915_READ(MEMMODECTL); - u32 rstdbyctl = I915_READ(RSTDBYCTL); - u16 crstandvid = I915_READ16(CRSTANDVID); + u32 rgvmodectl, rstdbyctl; + u16 crstandvid; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + rgvmodectl = I915_READ(MEMMODECTL); + rstdbyctl = I915_READ(RSTDBYCTL); + crstandvid = I915_READ16(CRSTANDVID); + + mutex_unlock(&dev->struct_mutex); seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? "yes" : "no"); @@ -1167,9 +1205,16 @@ static int i915_gfxec(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); + mutex_unlock(&dev->struct_mutex); + return 0; } @@ -1506,7 +1551,10 @@ drm_add_fake_info_node(struct drm_minor *minor, node->minor = minor; node->dent = ent; node->info_ent = (void *) key; - list_add(&node->list, &minor->debugfs_nodes.list); + + mutex_lock(&minor->debugfs_lock); + list_add(&node->list, &minor->debugfs_list); + mutex_unlock(&minor->debugfs_lock); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index cc531bb59c26..15bfa9145d2b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -68,7 +68,7 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); MODULE_PARM_DESC(i915_enable_rc6, "Enable power-saving render C-state 6 (default: true)"); -unsigned int i915_enable_fbc __read_mostly = -1; +int i915_enable_fbc __read_mostly = -1; module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); MODULE_PARM_DESC(i915_enable_fbc, "Enable frame buffer compression for power savings " @@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock, "Use panel (LVDS/eDP) downclocking for power savings " "(default: false)"); -unsigned int i915_panel_use_ssc __read_mostly = -1; +int i915_panel_use_ssc __read_mostly = -1; module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); MODULE_PARM_DESC(lvds_use_ssc, "Use Spread Spectrum Clock with panels [LVDS/eDP] " @@ -107,7 +107,7 @@ static struct drm_driver driver; extern int intel_agp_enabled; #define INTEL_VGA_DEVICE(id, info) { \ - .class = PCI_CLASS_DISPLAY_VGA << 8, \ + .class = PCI_BASE_CLASS_DISPLAY << 16, \ .class_mask = 0xff0000, \ .vendor = 0x8086, \ .device = id, \ @@ -789,8 +789,8 @@ static struct vm_operations_struct i915_gem_vm_ops = { }; static struct drm_driver driver = { - /* don't use mtrr's here, the Xserver or user space app should - * deal with them for intel hardware. + /* Don't use MTRRs here; the Xserver or userspace app should + * deal with them for Intel hardware. */ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 06a37f4fd74b..4a9c1b979804 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -126,6 +126,9 @@ struct drm_i915_master_private { struct _drm_i915_sarea *sarea_priv; }; #define I915_FENCE_REG_NONE -1 +#define I915_MAX_NUM_FENCES 16 +/* 16 fences + sign bit for FENCE_REG_NONE */ +#define I915_MAX_NUM_FENCE_BITS 5 struct drm_i915_fence_reg { struct list_head lru_list; @@ -168,7 +171,7 @@ struct drm_i915_error_state { u32 instdone1; u32 seqno; u64 bbaddr; - u64 fence[16]; + u64 fence[I915_MAX_NUM_FENCES]; struct timeval time; struct drm_i915_error_object { int page_count; @@ -182,7 +185,7 @@ struct drm_i915_error_state { u32 gtt_offset; u32 read_domains; u32 write_domain; - s32 fence_reg:5; + s32 fence_reg:I915_MAX_NUM_FENCE_BITS; s32 pinned:2; u32 tiling:2; u32 dirty:1; @@ -375,7 +378,7 @@ typedef struct drm_i915_private { struct notifier_block lid_notifier; int crt_ddc_pin; - struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ + struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */ @@ -506,7 +509,7 @@ typedef struct drm_i915_private { u8 saveAR[21]; u8 saveDACMASK; u8 saveCR[37]; - uint64_t saveFENCE[16]; + uint64_t saveFENCE[I915_MAX_NUM_FENCES]; u32 saveCURACNTR; u32 saveCURAPOS; u32 saveCURABASE; @@ -777,10 +780,8 @@ struct drm_i915_gem_object { * Fence register bits (if any) for this object. Will be set * as needed when mapped into the GTT. * Protected by dev->struct_mutex. - * - * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) */ - signed int fence_reg:5; + signed int fence_reg:I915_MAX_NUM_FENCE_BITS; /** * Advice: are the backing pages purgeable? @@ -999,10 +1000,10 @@ extern int i915_panel_ignore_lid __read_mostly; extern unsigned int i915_powersave __read_mostly; extern unsigned int i915_semaphores __read_mostly; extern unsigned int i915_lvds_downclock __read_mostly; -extern unsigned int i915_panel_use_ssc __read_mostly; +extern int i915_panel_use_ssc __read_mostly; extern int i915_vbt_sdvo_panel_type __read_mostly; extern unsigned int i915_enable_rc6 __read_mostly; -extern unsigned int i915_enable_fbc __read_mostly; +extern int i915_enable_fbc __read_mostly; extern bool i915_enable_hangcheck __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6651c36b6e8a..60ff1b63b568 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1396,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file, if (obj->base.size > dev_priv->mm.gtt_mappable_end) { ret = -E2BIG; - goto unlock; + goto out; } if (obj->madv != I915_MADV_WILLNEED) { @@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - for (i = 0; i < 16; i++) { + for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; struct drm_i915_gem_object *obj = reg->obj; @@ -2026,8 +2026,13 @@ i915_wait_request(struct intel_ring_buffer *ring, * to handle this, the waiter on a request often wants an associated * buffer to have made it to the inactive list, and we would need * a separate wait queue to handle that. + * + * To avoid a recursion with the ilk VT-d workaround (that calls + * gpu_idle when unbinding objects with interruptible==false) don't + * retire requests in that case (because it might call unbind if the + * active list holds the last reference to the object). */ - if (ret == 0) + if (ret == 0 && dev_priv->mm.interruptible) i915_gem_retire_requests_ring(ring); return ret; @@ -3512,9 +3517,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * so emit a request to do so. */ request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request) + if (request) { ret = i915_add_request(obj->ring, NULL, request); - else + if (ret) + kfree(request); + } else ret = -ENOMEM; } @@ -3613,7 +3620,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU; - if (IS_GEN6(dev)) { + if (IS_GEN6(dev) || IS_GEN7(dev)) { /* On Gen6, we can have the GPU use the LLC (the CPU * cache) for about a 10% performance improvement * compared to uncached. Graphics requests other than @@ -3877,7 +3884,7 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.gtt_list); for (i = 0; i < I915_NUM_RINGS; i++) init_ring_lists(&dev_priv->ring[i]); - for (i = 0; i < 16; i++) + for (i = 0; i < I915_MAX_NUM_FENCES; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9ee2729fe5c6..b40004b55977 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev, /* Fences */ switch (INTEL_INFO(dev)->gen) { + case 7: case 6: for (i = 0; i < 16; i++) error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5a09416e611f..b080cc824001 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1553,12 +1553,21 @@ */ #define PP_READY (1 << 30) #define PP_SEQUENCE_NONE (0 << 28) -#define PP_SEQUENCE_ON (1 << 28) -#define PP_SEQUENCE_OFF (2 << 28) -#define PP_SEQUENCE_MASK 0x30000000 +#define PP_SEQUENCE_POWER_UP (1 << 28) +#define PP_SEQUENCE_POWER_DOWN (2 << 28) +#define PP_SEQUENCE_MASK (3 << 28) +#define PP_SEQUENCE_SHIFT 28 #define PP_CYCLE_DELAY_ACTIVE (1 << 27) -#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) #define PP_SEQUENCE_STATE_MASK 0x0000000f +#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) +#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) +#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) +#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) +#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) +#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) +#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) +#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) +#define PP_SEQUENCE_STATE_RESET (0xf << 0) #define PP_CONTROL 0x61204 #define POWER_TARGET_ON (1 << 0) #define PP_ON_DELAYS 0x61208 @@ -3444,6 +3453,10 @@ #define GT_FIFO_FREE_ENTRIES 0x120008 #define GT_FIFO_NUM_RESERVED_ENTRIES 20 +#define GEN6_UCGCTL2 0x9404 +# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) +# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) + #define GEN6_RPNSWREQ 0xA008 #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index f8f602d76650..7886e4fb60e3 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) /* Fences */ switch (INTEL_INFO(dev)->gen) { + case 7: case 6: for (i = 0; i < 16; i++) dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); @@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) /* Fences */ switch (INTEL_INFO(dev)->gen) { + case 7: case 6: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 981b1f1c04d8..e77a863a3833 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && - intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); @@ -4711,7 +4712,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, lvds_bpc = 6; if (lvds_bpc < display_bpc) { - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); + DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); display_bpc = lvds_bpc; } continue; @@ -4722,7 +4723,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, unsigned int edp_bpc = dev_priv->edp.bpp / 3; if (edp_bpc < display_bpc) { - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); + DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); display_bpc = edp_bpc; } continue; @@ -4737,7 +4738,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, /* Don't use an invalid EDID bpc value */ if (connector->display_info.bpc && connector->display_info.bpc < display_bpc) { - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); + DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); display_bpc = connector->display_info.bpc; } } @@ -4748,10 +4749,10 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, */ if (intel_encoder->type == INTEL_OUTPUT_HDMI) { if (display_bpc > 8 && display_bpc < 12) { - DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); + DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); display_bpc = 12; } else { - DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); + DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); display_bpc = 8; } } @@ -4789,8 +4790,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, display_bpc = min(display_bpc, bpc); - DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", - bpc, display_bpc); + DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", + bpc, display_bpc); *pipe_bpp = display_bpc * 3; @@ -5671,7 +5672,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; if ((is_lvds && dev_priv->lvds_dither) || dither) { pipeconf |= PIPECONF_DITHER_EN; - pipeconf |= PIPECONF_DITHER_TYPE_ST1; + pipeconf |= PIPECONF_DITHER_TYPE_SP; } if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { intel_dp_set_m_n(crtc, mode, adjusted_mode); @@ -8148,6 +8149,20 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock + * gating disable must be set. Failure to set it results in + * flickering pixels due to Z write ordering failures after + * some amount of runtime in the Mesa "fire" demo, and Unigine + * Sanctuary and Tropics, and apparently anything else with + * alpha test or pixel discard. + * + * According to the spec, bit 11 (RCCUNIT) must also be set, + * but we didn't debug actual testcases to find it out. + */ + I915_WRITE(GEN6_UCGCTL2, + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); + /* * According to the spec the following bits should be * set in order to enable memory self-refresh and fbc: diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 09b318b0227f..4d0358fad937 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -59,7 +59,6 @@ struct intel_dp { struct i2c_algo_dp_aux_data algo; bool is_pch_edp; uint8_t train_set[4]; - uint8_t link_status[DP_LINK_STATUS_SIZE]; int panel_power_up_delay; int panel_power_down_delay; int panel_power_cycle_delay; @@ -68,7 +67,6 @@ struct intel_dp { struct drm_display_mode *panel_fixed_mode; /* for eDP */ struct delayed_work panel_vdd_work; bool want_panel_vdd; - unsigned long panel_off_jiffies; }; /** @@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder, static int intel_dp_max_lane_count(struct intel_dp *intel_dp) { - int max_lane_count = 4; - - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { - max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; - switch (max_lane_count) { - case 1: case 2: case 4: - break; - default: - max_lane_count = 4; - } + int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; + switch (max_lane_count) { + case 1: case 2: case 4: + break; + default: + max_lane_count = 4; } return max_lane_count; } @@ -768,12 +762,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, continue; intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || + intel_dp->base.type == INTEL_OUTPUT_EDP) + { lane_count = intel_dp->lane_count; break; - } else if (is_edp(intel_dp)) { - lane_count = dev_priv->edp.lanes; - break; } } @@ -810,6 +803,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -822,18 +816,31 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, ironlake_edp_pll_off(encoder); } - intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; - intel_dp->DP |= intel_dp->color_range; + /* + * There are three kinds of DP registers: + * + * IBX PCH + * CPU + * CPT PCH + * + * IBX PCH and CPU are the same for almost everything, + * except that the CPU DP PLL is configured in this + * register + * + * CPT PCH is quite different, having many bits moved + * to the TRANS_DP_CTL register instead. That + * configuration happens (oddly) in ironlake_pch_enable + */ - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - intel_dp->DP |= DP_SYNC_HS_HIGH; - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - intel_dp->DP |= DP_SYNC_VS_HIGH; + /* Preserve the BIOS-computed detected bit. This is + * supposed to be read-only. + */ + intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; - if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) - intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; - else - intel_dp->DP |= DP_LINK_TRAIN_OFF; + /* Handle DP bits in common between all three register formats */ + + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; switch (intel_dp->lane_count) { case 1: @@ -852,59 +859,106 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; intel_write_eld(encoder, adjusted_mode); } - memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); intel_dp->link_configuration[0] = intel_dp->link_bw; intel_dp->link_configuration[1] = intel_dp->lane_count; intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; - /* * Check for DPCD version > 1.1 and enhanced framing support */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - intel_dp->DP |= DP_ENHANCED_FRAMING; } - /* CPT DP's pipe select is decided in TRANS_DP_CTL */ - if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) - intel_dp->DP |= DP_PIPEB_SELECT; + /* Split out the IBX/CPU vs CPT settings */ - if (is_cpu_edp(intel_dp)) { - /* don't miss out required setting for eDP */ - intel_dp->DP |= DP_PLL_ENABLE; - if (adjusted_mode->clock < 200000) - intel_dp->DP |= DP_PLL_FREQ_160MHZ; - else - intel_dp->DP |= DP_PLL_FREQ_270MHZ; + if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { + intel_dp->DP |= intel_dp->color_range; + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + intel_dp->DP |= DP_SYNC_HS_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + intel_dp->DP |= DP_SYNC_VS_HIGH; + intel_dp->DP |= DP_LINK_TRAIN_OFF; + + if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) + intel_dp->DP |= DP_ENHANCED_FRAMING; + + if (intel_crtc->pipe == 1) + intel_dp->DP |= DP_PIPEB_SELECT; + + if (is_cpu_edp(intel_dp)) { + /* don't miss out required setting for eDP */ + intel_dp->DP |= DP_PLL_ENABLE; + if (adjusted_mode->clock < 200000) + intel_dp->DP |= DP_PLL_FREQ_160MHZ; + else + intel_dp->DP |= DP_PLL_FREQ_270MHZ; + } + } else { + intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; } } -static void ironlake_wait_panel_off(struct intel_dp *intel_dp) +#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) +#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) + +#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) +#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) + +#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) +#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) + +static void ironlake_wait_panel_status(struct intel_dp *intel_dp, + u32 mask, + u32 value) { - unsigned long off_time; - unsigned long delay; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; - DRM_DEBUG_KMS("Wait for panel power off time\n"); + DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", + mask, value, + I915_READ(PCH_PP_STATUS), + I915_READ(PCH_PP_CONTROL)); - if (ironlake_edp_have_panel_power(intel_dp) || - ironlake_edp_have_panel_vdd(intel_dp)) - { - DRM_DEBUG_KMS("Panel still on, no delay needed\n"); - return; + if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { + DRM_ERROR("Panel status timeout: status %08x control %08x\n", + I915_READ(PCH_PP_STATUS), + I915_READ(PCH_PP_CONTROL)); } +} - off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); - if (time_after(jiffies, off_time)) { - DRM_DEBUG_KMS("Time already passed"); - return; - } - delay = jiffies_to_msecs(off_time - jiffies); - if (delay > intel_dp->panel_power_down_delay) - delay = intel_dp->panel_power_down_delay; - DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); - msleep(delay); +static void ironlake_wait_panel_on(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power on\n"); + ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); +} + +static void ironlake_wait_panel_off(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power off time\n"); + ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); +} + +static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power cycle\n"); + ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); +} + + +/* Read the current pp_control value, unlocking the register if it + * is locked + */ + +static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) +{ + u32 control = I915_READ(PCH_PP_CONTROL); + + control &= ~PANEL_UNLOCK_MASK; + control |= PANEL_UNLOCK_REGS; + return control; } static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) @@ -921,15 +975,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) "eDP VDD already requested on\n"); intel_dp->want_panel_vdd = true; + if (ironlake_edp_have_panel_vdd(intel_dp)) { DRM_DEBUG_KMS("eDP VDD already on\n"); return; } - ironlake_wait_panel_off(intel_dp); - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; + if (!ironlake_edp_have_panel_power(intel_dp)) + ironlake_wait_panel_power_cycle(intel_dp); + + pp = ironlake_get_pp_control(dev_priv); pp |= EDP_FORCE_VDD; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -952,9 +1007,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) u32 pp; if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; + pp = ironlake_get_pp_control(dev_priv); pp &= ~EDP_FORCE_VDD; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -962,7 +1015,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) /* Make sure sequencer is idle before allowing subsequent activity */ DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); - intel_dp->panel_off_jiffies = jiffies; + + msleep(intel_dp->panel_power_down_delay); } } @@ -972,9 +1026,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work) struct intel_dp, panel_vdd_work); struct drm_device *dev = intel_dp->base.base.dev; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev->mode_config.mutex); ironlake_panel_vdd_off_sync(intel_dp); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->mode_config.mutex); } static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) @@ -984,7 +1038,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); - + intel_dp->want_panel_vdd = false; if (sync) { @@ -1000,23 +1054,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) } } -/* Returns true if the panel was already on when called */ static void ironlake_edp_panel_on(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; + u32 pp; if (!is_edp(intel_dp)) return; - if (ironlake_edp_have_panel_power(intel_dp)) + + DRM_DEBUG_KMS("Turn eDP power on\n"); + + if (ironlake_edp_have_panel_power(intel_dp)) { + DRM_DEBUG_KMS("eDP power already on\n"); return; + } - ironlake_wait_panel_off(intel_dp); - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; + ironlake_wait_panel_power_cycle(intel_dp); + pp = ironlake_get_pp_control(dev_priv); if (IS_GEN5(dev)) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; @@ -1025,13 +1081,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp) } pp |= POWER_TARGET_ON; + if (!IS_GEN5(dev)) + pp |= PANEL_POWER_RESET; + I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); - if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, - 5000)) - DRM_ERROR("panel on wait timed out: 0x%08x\n", - I915_READ(PCH_PP_STATUS)); + ironlake_wait_panel_on(intel_dp); if (IS_GEN5(dev)) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ @@ -1040,46 +1096,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp) } } -static void ironlake_edp_panel_off(struct drm_encoder *encoder) +static void ironlake_edp_panel_off(struct intel_dp *intel_dp) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_device *dev = encoder->dev; + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | - PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; + u32 pp; if (!is_edp(intel_dp)) return; - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; - if (IS_GEN5(dev)) { - /* ILK workaround: disable reset around power sequence */ - pp &= ~PANEL_POWER_RESET; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - } + DRM_DEBUG_KMS("Turn eDP power off\n"); - intel_dp->panel_off_jiffies = jiffies; + WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); - if (IS_GEN5(dev)) { - pp &= ~POWER_TARGET_ON; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - pp &= ~POWER_TARGET_ON; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - msleep(intel_dp->panel_power_cycle_delay); - - if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) - DRM_ERROR("panel off wait timed out: 0x%08x\n", - I915_READ(PCH_PP_STATUS)); + pp = ironlake_get_pp_control(dev_priv); + pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); + I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); - pp |= PANEL_POWER_RESET; /* restore panel reset bit */ - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - } + ironlake_wait_panel_off(intel_dp); } static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) @@ -1099,9 +1134,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) * allowing it to appear. */ msleep(intel_dp->backlight_on_delay); - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; + pp = ironlake_get_pp_control(dev_priv); pp |= EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -1117,9 +1150,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) return; DRM_DEBUG_KMS("\n"); - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - pp |= PANEL_UNLOCK_REGS; + pp = ironlake_get_pp_control(dev_priv); pp &= ~EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -1187,17 +1218,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + ironlake_edp_backlight_off(intel_dp); + ironlake_edp_panel_off(intel_dp); + /* Wake up the sink first */ ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_link_down(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, false); /* Make sure the panel is off before trying to * change the mode */ - ironlake_edp_backlight_off(intel_dp); - intel_dp_link_down(intel_dp); - ironlake_edp_panel_off(encoder); } static void intel_dp_commit(struct drm_encoder *encoder) @@ -1211,7 +1243,6 @@ static void intel_dp_commit(struct drm_encoder *encoder) intel_dp_start_link_train(intel_dp); ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, true); - intel_dp_complete_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); @@ -1230,16 +1261,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { + ironlake_edp_backlight_off(intel_dp); + ironlake_edp_panel_off(intel_dp); + ironlake_edp_panel_vdd_on(intel_dp); - if (is_edp(intel_dp)) - ironlake_edp_backlight_off(intel_dp); intel_dp_sink_dpms(intel_dp, mode); intel_dp_link_down(intel_dp); - ironlake_edp_panel_off(encoder); - if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) - ironlake_edp_pll_off(encoder); ironlake_edp_panel_vdd_off(intel_dp, false); + + if (is_cpu_edp(intel_dp)) + ironlake_edp_pll_off(encoder); } else { + if (is_cpu_edp(intel_dp)) + ironlake_edp_pll_on(encoder); + ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, mode); if (!(dp_reg & DP_PORT_EN)) { @@ -1247,7 +1282,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); - ironlake_edp_backlight_on(intel_dp); } else ironlake_edp_panel_vdd_off(intel_dp, false); ironlake_edp_backlight_on(intel_dp); @@ -1285,11 +1319,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, * link status information */ static bool -intel_dp_get_link_status(struct intel_dp *intel_dp) +intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { return intel_dp_aux_native_read_retry(intel_dp, DP_LANE0_1_STATUS, - intel_dp->link_status, + link_status, DP_LINK_STATUS_SIZE); } @@ -1301,27 +1335,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], } static uint8_t -intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], +intel_get_adjust_request_voltage(uint8_t adjust_request[2], int lane) { - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); int s = ((lane & 1) ? DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); - uint8_t l = intel_dp_link_status(link_status, i); + uint8_t l = adjust_request[lane>>1]; return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; } static uint8_t -intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], +intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], int lane) { - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); int s = ((lane & 1) ? DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); - uint8_t l = intel_dp_link_status(link_status, i); + uint8_t l = adjust_request[lane>>1]; return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; } @@ -1344,6 +1376,7 @@ static char *link_train_names[] = { * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB */ #define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 +#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200 static uint8_t intel_dp_pre_emphasis_max(uint8_t voltage_swing) @@ -1362,15 +1395,18 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing) } static void -intel_get_adjust_train(struct intel_dp *intel_dp) +intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { + struct drm_device *dev = intel_dp->base.base.dev; uint8_t v = 0; uint8_t p = 0; int lane; + uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); + int voltage_max; for (lane = 0; lane < intel_dp->lane_count; lane++) { - uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); - uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); + uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); + uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); if (this_v > v) v = this_v; @@ -1378,8 +1414,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp) p = this_p; } - if (v >= I830_DP_VOLTAGE_MAX) - v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; + if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) + voltage_max = I830_DP_VOLTAGE_MAX_CPT; + else + voltage_max = I830_DP_VOLTAGE_MAX; + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; if (p >= intel_dp_pre_emphasis_max(v)) p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; @@ -1389,7 +1429,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp) } static uint32_t -intel_dp_signal_levels(uint8_t train_set, int lane_count) +intel_dp_signal_levels(uint8_t train_set) { uint32_t signal_levels = 0; @@ -1458,9 +1498,8 @@ static uint8_t intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane) { - int i = DP_LANE0_1_STATUS + (lane >> 1); int s = (lane & 1) * 4; - uint8_t l = intel_dp_link_status(link_status, i); + uint8_t l = link_status[lane>>1]; return (l >> s) & 0xf; } @@ -1485,18 +1524,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count DP_LANE_CHANNEL_EQ_DONE|\ DP_LANE_SYMBOL_LOCKED) static bool -intel_channel_eq_ok(struct intel_dp *intel_dp) +intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) { uint8_t lane_align; uint8_t lane_status; int lane; - lane_align = intel_dp_link_status(intel_dp->link_status, + lane_align = intel_dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) return false; for (lane = 0; lane < intel_dp->lane_count; lane++) { - lane_status = intel_get_lane_status(intel_dp->link_status, lane); + lane_status = intel_get_lane_status(link_status, lane); if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) return false; } @@ -1521,8 +1560,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, - intel_dp->train_set, 4); - if (ret != 4) + intel_dp->train_set, + intel_dp->lane_count); + if (ret != intel_dp->lane_count) return false; return true; @@ -1538,7 +1578,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) int i; uint8_t voltage; bool clock_recovery = false; - int tries; + int voltage_tries, loop_tries; u32 reg; uint32_t DP = intel_dp->DP; @@ -1565,16 +1605,20 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) DP &= ~DP_LINK_TRAIN_MASK; memset(intel_dp->train_set, 0, 4); voltage = 0xff; - tries = 0; + voltage_tries = 0; + loop_tries = 0; clock_recovery = false; for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ + uint8_t link_status[DP_LINK_STATUS_SIZE]; uint32_t signal_levels; - if (IS_GEN6(dev) && is_edp(intel_dp)) { + + if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); + DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1590,10 +1634,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) /* Set training pattern 1 */ udelay(100); - if (!intel_dp_get_link_status(intel_dp)) + if (!intel_dp_get_link_status(intel_dp, link_status)) { + DRM_ERROR("failed to get link status\n"); break; + } - if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { + DRM_DEBUG_KMS("clock recovery OK\n"); clock_recovery = true; break; } @@ -1602,20 +1649,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) for (i = 0; i < intel_dp->lane_count; i++) if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; - if (i == intel_dp->lane_count) - break; + if (i == intel_dp->lane_count) { + ++loop_tries; + if (loop_tries == 5) { + DRM_DEBUG_KMS("too many full retries, give up\n"); + break; + } + memset(intel_dp->train_set, 0, 4); + voltage_tries = 0; + continue; + } /* Check to see if we've tried the same voltage 5 times */ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { - ++tries; - if (tries == 5) + ++voltage_tries; + if (voltage_tries == 5) { + DRM_DEBUG_KMS("too many voltage retries, give up\n"); break; + } } else - tries = 0; + voltage_tries = 0; voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); + intel_get_adjust_train(intel_dp, link_status); } intel_dp->DP = DP; @@ -1638,6 +1695,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; + uint8_t link_status[DP_LINK_STATUS_SIZE]; if (cr_tries > 5) { DRM_ERROR("failed to train DP, aborting\n"); @@ -1645,11 +1703,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) break; } - if (IS_GEN6(dev) && is_edp(intel_dp)) { + if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1665,17 +1723,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) break; udelay(400); - if (!intel_dp_get_link_status(intel_dp)) + if (!intel_dp_get_link_status(intel_dp, link_status)) break; /* Make sure clock is still ok */ - if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { intel_dp_start_link_train(intel_dp); cr_tries++; continue; } - if (intel_channel_eq_ok(intel_dp)) { + if (intel_channel_eq_ok(intel_dp, link_status)) { channel_eq = true; break; } @@ -1690,7 +1748,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) } /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); + intel_get_adjust_train(intel_dp, link_status); ++tries; } @@ -1735,8 +1793,12 @@ intel_dp_link_down(struct intel_dp *intel_dp) msleep(17); - if (is_edp(intel_dp)) - DP |= DP_LINK_TRAIN_OFF; + if (is_edp(intel_dp)) { + if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) + DP |= DP_LINK_TRAIN_OFF_CPT; + else + DP |= DP_LINK_TRAIN_OFF; + } if (!HAS_PCH_CPT(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { @@ -1822,6 +1884,7 @@ static void intel_dp_check_link_status(struct intel_dp *intel_dp) { u8 sink_irq_vector; + u8 link_status[DP_LINK_STATUS_SIZE]; if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) return; @@ -1830,7 +1893,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) return; /* Try to read receiver status if the link appears to be up */ - if (!intel_dp_get_link_status(intel_dp)) { + if (!intel_dp_get_link_status(intel_dp, link_status)) { intel_dp_link_down(intel_dp); return; } @@ -1855,7 +1918,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); } - if (!intel_channel_eq_ok(intel_dp)) { + if (!intel_channel_eq_ok(intel_dp, link_status)) { DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", drm_get_encoder_name(&intel_dp->base.base)); intel_dp_start_link_train(intel_dp); @@ -2179,7 +2242,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc) continue; intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || + intel_dp->base.type == INTEL_OUTPUT_EDP) return intel_dp->output_reg; } @@ -2321,7 +2385,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> PANEL_LIGHT_ON_DELAY_SHIFT; - + cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> PANEL_LIGHT_OFF_DELAY_SHIFT; @@ -2354,11 +2418,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); - intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay; - ironlake_edp_panel_vdd_on(intel_dp); ret = intel_dp_get_dpcd(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, false); + if (ret) { if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) dev_priv->no_aux_handshake = diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 499d4c0dbeeb..21f60b7d69a3 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -326,7 +326,8 @@ static int intel_panel_update_status(struct backlight_device *bd) static int intel_panel_get_brightness(struct backlight_device *bd) { struct drm_device *dev = bl_get_data(bd); - return intel_panel_get_backlight(dev); + struct drm_i915_private *dev_priv = dev->dev_private; + return dev_priv->backlight_level; } static const struct backlight_ops intel_panel_bl_ops = { diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 032a82098136..5fc201b49d30 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -640,10 +640,9 @@ static int nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t reg0 = nv_rd32(dev, reg + 0); - uint32_t reg1 = nv_rd32(dev, reg + 4); struct nouveau_pll_vals pll; struct pll_lims pll_limits; + u32 ctrl, mask, coef; int ret; ret = get_pll_limits(dev, reg, &pll_limits); @@ -654,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) if (!clk) return -ERANGE; - reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); - reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; - - if (dev_priv->vbios.execute) { - still_alive(); - nv_wr32(dev, reg + 4, reg1); - nv_wr32(dev, reg + 0, reg0); + coef = pll.N1 << 8 | pll.M1; + ctrl = pll.log2P << 16; + mask = 0x00070000; + if (reg == 0x004008) { + mask |= 0x01f80000; + ctrl |= (pll_limits.log2p_bias << 19); + ctrl |= (pll.log2P << 22); } + if (!dev_priv->vbios.execute) + return 0; + + nv_mask(dev, reg + 0, mask, ctrl); + nv_wr32(dev, reg + 4, coef); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 7226f419e178..7cc37e690860 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -148,7 +148,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) if (dev_priv->card_type == NV_10 && nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && - nvbo->bo.mem.num_pages < vram_pages / 2) { + nvbo->bo.mem.num_pages < vram_pages / 4) { /* * Make sure that the color and depth buffers are handled * by independent memory controller units. Up to a 9x diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index a319d5646ea9..bb6ec9ef8676 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, INIT_LIST_HEAD(&chan->nvsw.vbl_wait); INIT_LIST_HEAD(&chan->nvsw.flip); INIT_LIST_HEAD(&chan->fence.pending); + spin_lock_init(&chan->fence.lock); /* setup channel's memory and vm */ ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index e0d275e1c96c..cea6696b1906 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -710,7 +710,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, case OUTPUT_DP: max_clock = nv_encoder->dp.link_nr; max_clock *= nv_encoder->dp.link_bw; - clock = clock * nouveau_connector_bpp(connector) / 8; + clock = clock * nouveau_connector_bpp(connector) / 10; break; default: BUG_ON(1); diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index ddbabefb4273..b12fd2c80812 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, spin_unlock_irqrestore(&dev->event_lock, flags); return 0; } + +int +nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct nouveau_bo *bo; + int ret; + + args->pitch = roundup(args->width * (args->bpp / 8), 256); + args->size = args->pitch * args->height; + args->size = roundup(args->size, PAGE_SIZE); + + ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo); + if (ret) + return ret; + + ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle); + drm_gem_object_unreference_unlocked(bo->gem); + return ret; +} + +int +nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle) +{ + return drm_gem_handle_delete(file_priv, handle); +} + +int +nouveau_display_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle, uint64_t *poffset) +{ + struct drm_gem_object *gem; + + gem = drm_gem_object_lookup(dev, file_priv, handle); + if (gem) { + struct nouveau_bo *bo = gem->driver_private; + *poffset = bo->bo.addr_space_offset; + drm_gem_object_unreference_unlocked(gem); + return 0; + } + + return -ENOENT; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 9f7bb1295262..9791d13c9e3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -433,6 +433,10 @@ static struct drm_driver driver = { .gem_open_object = nouveau_gem_object_open, .gem_close_object = nouveau_gem_object_close, + .dumb_create = nouveau_display_dumb_create, + .dumb_map_offset = nouveau_display_dumb_map_offset, + .dumb_destroy = nouveau_display_dumb_destroy, + .name = DRIVER_NAME, .desc = DRIVER_DESC, #ifdef GIT_REVISION diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 29837da1098b..4c0be3a4ed88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event); int nouveau_finish_page_flip(struct nouveau_channel *, struct nouveau_page_flip_state *); +int nouveau_display_dumb_create(struct drm_file *, struct drm_device *, + struct drm_mode_create_dumb *args); +int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *, + uint32_t handle, uint64_t *offset); +int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *, + uint32_t handle); /* nv10_gpio.c */ int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 14a8627efe4d..3a4cc32b9e44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -487,6 +487,7 @@ int nouveau_fbcon_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fbdev *nfbdev; + int preferred_bpp; int ret; nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); @@ -505,7 +506,15 @@ int nouveau_fbcon_init(struct drm_device *dev) } drm_fb_helper_single_add_all_connectors(&nfbdev->helper); - drm_fb_helper_initial_config(&nfbdev->helper, 32); + + if (dev_priv->vram_size <= 32 * 1024 * 1024) + preferred_bpp = 8; + else if (dev_priv->vram_size <= 64 * 1024 * 1024) + preferred_bpp = 16; + else + preferred_bpp = 32; + + drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 81116cfea275..2f6daae68b9d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) return ret; } - INIT_LIST_HEAD(&chan->fence.pending); - spin_lock_init(&chan->fence.lock); atomic_set(&chan->fence.last_sequence_irq, 0); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index c6143df48b9f..d39b2202b197 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -333,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what, NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); - for (i = 0; info[i].addr; i++) { + for (i = 0; i2c && info[i].addr; i++) { if (nouveau_probe_i2c_addr(i2c, info[i].addr) && (!match || match(i2c, &info[i]))) { NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 02222c540aee..960c0ae0c0c3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) return ret; } - ret = drm_mm_init(&chan->ramin_heap, base, size); + ret = drm_mm_init(&chan->ramin_heap, base, size - base); if (ret) { NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); nouveau_gpuobj_ref(NULL, &chan->ramin); diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index 9f178aa94162..33d03fbf00df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c @@ -239,7 +239,7 @@ nouveau_perf_init(struct drm_device *dev) if(version == 0x15) { memtimings->timing = kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); - if(!memtimings) { + if (!memtimings->timing) { NV_WARN(dev,"Could not allocate memtiming table\n"); return; } diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index b75258a9fe44..c8a463b76c89 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be) pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); } + nvbe->unmap_pages = false; } + + nvbe->pages = NULL; } static void diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 82478e0998e5..d8831ab42bb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -579,6 +579,14 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_display_early; + /* workaround an odd issue on nvc1 by disabling the device's + * nosnoop capability. hopefully won't cause issues until a + * better fix is found - assuming there is one... + */ + if (dev_priv->chipset == 0xc1) { + nv_mask(dev, 0x00088080, 0x00000800, 0x00000000); + } + nouveau_pm_init(dev); ret = engine->vram.init(dev); @@ -1102,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) dev_priv->noaccel = !!nouveau_noaccel; if (nouveau_noaccel == -1) { switch (dev_priv->chipset) { - case 0xc1: /* known broken */ - case 0xc8: /* never tested */ +#if 0 + case 0xXX: /* known broken */ NV_INFO(dev, "acceleration disabled by default, pass " "noaccel=0 to force enable\n"); dev_priv->noaccel = true; break; +#endif default: dev_priv->noaccel = false; break; diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index bbc0b9c7e1f7..e676b0d53478 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c @@ -57,12 +57,14 @@ read_pll_2(struct drm_device *dev, u32 reg) int P = (ctrl & 0x00070000) >> 16; u32 ref = 27000, clk = 0; - if (ctrl & 0x80000000) + if ((ctrl & 0x80000000) && M1) { clk = ref * N1 / M1; - - if (!(ctrl & 0x00000100)) { - if (ctrl & 0x40000000) - clk = clk * N2 / M2; + if ((ctrl & 0x40000100) == 0x40000000) { + if (M2) + clk = clk * N2 / M2; + else + clk = 0; + } } return clk >> P; @@ -177,6 +179,11 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) } /* memory clock */ + if (!perflvl->memory) { + info->mpll_ctrl = 0x00000000; + goto out; + } + ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, &N1, &M1, &N2, &M2, &log2P); if (ret < 0) @@ -264,6 +271,9 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state) mdelay(5); nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); + if (!info->mpll_ctrl) + goto resume; + /* wait for vblank start on active crtcs, disable memory access */ for (i = 0; i < 2; i++) { if (!(crtc_mask & (1 << i))) diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index d23ca00e7d62..06de250fe617 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_display *disp = nv50_display(dev); u32 unk30 = nv_rd32(dev, 0x610030), mc; - int i, crtc, or, type = OUTPUT_ANY; + int i, crtc, or = 0, type = OUTPUT_ANY; NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); disp->irq.dcb = NULL; @@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev) struct nv50_display *disp = nv50_display(dev); u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0; struct dcb_entry *dcb; - int i, crtc, or, type = OUTPUT_ANY; + int i, crtc, or = 0, type = OUTPUT_ANY; NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); dcb = disp->irq.dcb; diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8c979b31ff61..ac601f7c4e1a 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -131,8 +131,8 @@ nv50_graph_init(struct drm_device *dev, int engine) NV_DEBUG(dev, "\n"); /* master reset */ - nv_mask(dev, 0x000200, 0x00200100, 0x00000000); - nv_mask(dev, 0x000200, 0x00200100, 0x00200100); + nv_mask(dev, 0x000200, 0x00201000, 0x00000000); + nv_mask(dev, 0x000200, 0x00201000, 0x00201000); nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ /* reset/enable traps and interrupts */ diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index d05c2c3b2444..4b46d6968566 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c @@ -601,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) gr_def(ctx, offset + 0x1c, 0x00880000); break; case 0x86: - gr_def(ctx, offset + 0x1c, 0x008c0000); + gr_def(ctx, offset + 0x1c, 0x018c0000); break; case 0x92: case 0x96: diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c index 9da23838e63e..2e45e57fd869 100644 --- a/drivers/gpu/drm/nouveau/nv50_vram.c +++ b/drivers/gpu/drm/nouveau/nv50_vram.c @@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev) colbits = (r4 & 0x0000f000) >> 12; rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; - banks = ((r4 & 0x01000000) ? 8 : 4); + banks = 1 << (((r4 & 0x03000000) >> 24) + 2); rowsize = parts * banks * (1 << colbits) * 8; predicted = rowsize << rowbitsa; diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index bbdbc51830c8..ecfafd70cf0e 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -157,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; int i = 0, gpc, tp, ret; - u32 magic; ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, &grch->unk408004); @@ -207,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) nv_wo32(grch->mmio, i++ * 4, 0x0041880c); nv_wo32(grch->mmio, i++ * 4, 0x80000018); - magic = 0x02180000; - nv_wo32(grch->mmio, i++ * 4, 0x00405830); - nv_wo32(grch->mmio, i++ * 4, magic); - for (gpc = 0; gpc < priv->gpc_nr; gpc++) { - for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { - u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); - nv_wo32(grch->mmio, i++ * 4, reg); - nv_wo32(grch->mmio, i++ * 4, magic); + if (dev_priv->chipset != 0xc1) { + u32 magic = 0x02180000; + nv_wo32(grch->mmio, i++ * 4, 0x00405830); + nv_wo32(grch->mmio, i++ * 4, magic); + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { + u32 reg = TP_UNIT(gpc, tp, 0x520); + nv_wo32(grch->mmio, i++ * 4, reg); + nv_wo32(grch->mmio, i++ * 4, magic); + magic += 0x0324; + } + } + } else { + u32 magic = 0x02180000; + nv_wo32(grch->mmio, i++ * 4, 0x00405830); + nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218); + nv_wo32(grch->mmio, i++ * 4, 0x004064c4); + nv_wo32(grch->mmio, i++ * 4, 0x0086ffff); + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { + u32 reg = TP_UNIT(gpc, tp, 0x520); + nv_wo32(grch->mmio, i++ * 4, reg); + nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic); + magic += 0x0324; + } + for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { + u32 reg = TP_UNIT(gpc, tp, 0x544); + nv_wo32(grch->mmio, i++ * 4, reg); + nv_wo32(grch->mmio, i++ * 4, magic); + magic += 0x0324; + } } } @@ -358,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev) u8 tpnr[GPC_MAX]; int i, gpc, tpc; + nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */ + /* * TP ROP UNKVAL(magic_not_rop_nr) * 450: 4/0/0/0 2 3 diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index dd0e6a736b3b..96b0b93d94ca 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -1812,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) /* calculate first set of magics */ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); + gpc = -1; for (tp = 0; tp < priv->tp_total; tp++) { do { gpc = (gpc + 1) % priv->gpc_nr; @@ -1861,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan) if (1) { u32 tp_mask = 0, tp_set = 0; - u8 tpnr[GPC_MAX]; + u8 tpnr[GPC_MAX], a, b; memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); for (gpc = 0; gpc < priv->gpc_nr; gpc++) tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); - gpc = -1; - for (i = 0, gpc = -1; i < 32; i++) { - int ltp = i * (priv->tp_total - 1) / 32; - - do { - gpc = (gpc + 1) % priv->gpc_nr; - } while (!tpnr[gpc]); - tp = priv->tp_nr[gpc] - tpnr[gpc]--; + for (i = 0, gpc = -1, b = -1; i < 32; i++) { + a = (i * (priv->tp_total - 1)) / 32; + if (a != b) { + b = a; + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpnr[gpc]); + tp = priv->tp_nr[gpc] - tpnr[gpc]--; - tp_set |= 1 << ((gpc * 8) + tp); + tp_set |= 1 << ((gpc * 8) + tp); + } - do { - nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); - tp_set ^= tp_mask; - nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set); - tp_set ^= tp_mask; - } while (ltp == (++i * (priv->tp_total - 1) / 32)); - i--; + nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); + nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask); } } diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c index edbfe9360ae2..ce984d573a51 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vram.c +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c @@ -43,7 +43,7 @@ static const u8 types[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, - 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, + 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 @@ -110,22 +110,26 @@ nvc0_vram_init(struct drm_device *dev) u32 bsize = nv_rd32(dev, 0x10f20c); u32 offset, length; bool uniform = true; - int ret, i; + int ret, part; NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); /* read amount of vram attached to each memory controller */ - for (i = 0; i < parts; i++) { - u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000)); + part = 0; + while (parts) { + u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000)); + if (psize == 0) + continue; + parts--; + if (psize != bsize) { if (psize < bsize) bsize = psize; uniform = false; } - NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize); - + NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize); dev_priv->vram_size += (u64)psize << 20; } diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c index 23d63b4b3d77..cb006a718e70 100644 --- a/drivers/gpu/drm/nouveau/nvd0_display.c +++ b/drivers/gpu/drm/nouveau/nvd0_display.c @@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode) continue; if (nv_partner != nv_encoder && - nv_partner->dcb->or == nv_encoder->or) { + nv_partner->dcb->or == nv_encoder->dcb->or) { if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) return; break; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 87921c88a95c..2b97262e3ab1 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, return -EINVAL; } - if (tiling_flags & RADEON_TILING_MACRO) + if (tiling_flags & RADEON_TILING_MACRO) { + if (rdev->family >= CHIP_CAYMAN) + tmp = rdev->config.cayman.tile_config; + else + tmp = rdev->config.evergreen.tile_config; + + switch ((tmp & 0xf0) >> 4) { + case 0: /* 4 banks */ + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); + break; + case 1: /* 8 banks */ + default: + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); + break; + case 2: /* 16 banks */ + fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); + break; + } + + switch ((tmp & 0xf000) >> 12) { + case 0: /* 1KB rows */ + default: + fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB); + break; + case 1: /* 2KB rows */ + fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB); + break; + case 2: /* 4KB rows */ + fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB); + break; + } + fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); - else if (tiling_flags & RADEON_TILING_MICRO) + } else if (tiling_flags & RADEON_TILING_MICRO) fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); switch (radeon_crtc->crtc_id) { @@ -1522,12 +1553,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; - - /* adjust pm to upcoming mode change */ - radeon_pm_compute_clocks(rdev); - if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; return true; diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index a0de48542f71..6fb335a4fdda 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } } - DRM_ERROR("aux i2c too many retries, giving up\n"); + DRM_DEBUG_KMS("aux i2c too many retries, giving up\n"); return -EREMOTEIO; } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e4c384b9511c..5e00d1670aa9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); + int i; /* Lock the graphics update lock */ tmp |= EVERGREEN_GRPH_UPDATE_LOCK; @@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) (u32)crtc_base); /* Wait for update_pending to go high. */ - while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ @@ -157,6 +162,57 @@ int sumo_get_temp(struct radeon_device *rdev) return actual_temp * 1000; } +void sumo_pm_init_profile(struct radeon_device *rdev) +{ + int idx; + + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; + + /* low,mid sh/mh */ + if (rdev->flags & RADEON_IS_MOBILITY) + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); + else + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; + + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; + + /* high sh/mh */ + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = + rdev->pm.power_state[idx].num_clock_modes - 1; + + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = + rdev->pm.power_state[idx].num_clock_modes - 1; +} + void evergreen_pm_misc(struct radeon_device *rdev) { int req_ps_idx = rdev->pm.requested_power_state_index; @@ -1219,7 +1275,7 @@ void evergreen_mc_program(struct radeon_device *rdev) WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); } - WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); + WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); if (rdev->flags & RADEON_IS_IGP) { tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 7fdfa8ea7570..cd4590aae154 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -38,6 +38,7 @@ struct evergreen_cs_track { u32 group_size; u32 nbanks; u32 npipes; + u32 row_size; /* value we track */ u32 nsamples; u32 cb_color_base_last[12]; @@ -77,6 +78,44 @@ struct evergreen_cs_track { struct radeon_bo *db_s_write_bo; }; +static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) +{ + if (tiling_flags & RADEON_TILING_MACRO) + return ARRAY_2D_TILED_THIN1; + else if (tiling_flags & RADEON_TILING_MICRO) + return ARRAY_1D_TILED_THIN1; + else + return ARRAY_LINEAR_GENERAL; +} + +static u32 evergreen_cs_get_num_banks(u32 nbanks) +{ + switch (nbanks) { + case 2: + return ADDR_SURF_2_BANK; + case 4: + return ADDR_SURF_4_BANK; + case 8: + default: + return ADDR_SURF_8_BANK; + case 16: + return ADDR_SURF_16_BANK; + } +} + +static u32 evergreen_cs_get_tile_split(u32 row_size) +{ + switch (row_size) { + case 1: + default: + return ADDR_SURF_TILE_SPLIT_1KB; + case 2: + return ADDR_SURF_TILE_SPLIT_2KB; + case 4: + return ADDR_SURF_TILE_SPLIT_4KB; + } +} + static void evergreen_cs_track_init(struct evergreen_cs_track *track) { int i; @@ -480,21 +519,22 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) } break; case DB_Z_INFO: - r = evergreen_cs_packet_next_reloc(p, &reloc); - if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); - return -EINVAL; - } track->db_z_info = radeon_get_ib_value(p, idx); - ib[idx] &= ~Z_ARRAY_MODE(0xf); - track->db_z_info &= ~Z_ARRAY_MODE(0xf); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { - ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - } else { - ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] &= ~Z_ARRAY_MODE(0xf); + track->db_z_info &= ~Z_ARRAY_MODE(0xf); + ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); + ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); + } } break; case DB_STENCIL_INFO: @@ -607,40 +647,34 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR5_INFO: case CB_COLOR6_INFO: case CB_COLOR7_INFO: - r = evergreen_cs_packet_next_reloc(p, &reloc); - if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); - return -EINVAL; - } tmp = (reg - CB_COLOR0_INFO) / 0x3c; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); } break; case CB_COLOR8_INFO: case CB_COLOR9_INFO: case CB_COLOR10_INFO: case CB_COLOR11_INFO: - r = evergreen_cs_packet_next_reloc(p, &reloc); - if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); - return -EINVAL; - } tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { - ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); } break; case CB_COLOR0_PITCH: @@ -695,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR9_ATTRIB: case CB_COLOR10_ATTRIB: case CB_COLOR11_ATTRIB: + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); + ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); + } break; case CB_COLOR0_DIM: case CB_COLOR1_DIM: @@ -1311,10 +1355,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + ib[idx+1+(i*8)+1] |= + TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags)); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx+1+(i*8)+6] |= + TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size)); + ib[idx+1+(i*8)+7] |= + TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks)); + } + } texture = reloc->robj; /* tex mip base */ r = evergreen_cs_packet_next_reloc(p, &reloc); @@ -1414,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; struct evergreen_cs_track *track; + u32 tmp; int r; if (p->track == NULL) { @@ -1422,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) if (track == NULL) return -ENOMEM; evergreen_cs_track_init(track); - track->npipes = p->rdev->config.evergreen.tiling_npipes; - track->nbanks = p->rdev->config.evergreen.tiling_nbanks; - track->group_size = p->rdev->config.evergreen.tiling_group_size; + if (p->rdev->family >= CHIP_CAYMAN) + tmp = p->rdev->config.cayman.tile_config; + else + tmp = p->rdev->config.evergreen.tile_config; + + switch (tmp & 0xf) { + case 0: + track->npipes = 1; + break; + case 1: + default: + track->npipes = 2; + break; + case 2: + track->npipes = 4; + break; + case 3: + track->npipes = 8; + break; + } + + switch ((tmp & 0xf0) >> 4) { + case 0: + track->nbanks = 4; + break; + case 1: + default: + track->nbanks = 8; + break; + case 2: + track->nbanks = 16; + break; + } + + switch ((tmp & 0xf00) >> 8) { + case 0: + track->group_size = 256; + break; + case 1: + default: + track->group_size = 512; + break; + } + + switch ((tmp & 0xf000) >> 12) { + case 0: + track->row_size = 1; + break; + case 1: + default: + track->row_size = 2; + break; + case 2: + track->row_size = 4; + break; + } + p->track = track; } do { diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index c781c92c3451..7d7f2155e34c 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -42,6 +42,17 @@ # define EVERGREEN_GRPH_DEPTH_8BPP 0 # define EVERGREEN_GRPH_DEPTH_16BPP 1 # define EVERGREEN_GRPH_DEPTH_32BPP 2 +# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2) +# define EVERGREEN_ADDR_SURF_2_BANK 0 +# define EVERGREEN_ADDR_SURF_4_BANK 1 +# define EVERGREEN_ADDR_SURF_8_BANK 2 +# define EVERGREEN_ADDR_SURF_16_BANK 3 +# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4) +# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6) +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0 +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1 +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2 +# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3 # define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8) /* 8 BPP */ # define EVERGREEN_GRPH_FORMAT_INDEXED 0 @@ -61,6 +72,24 @@ # define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 # define EVERGREEN_GRPH_FORMAT_RGB111110 6 # define EVERGREEN_GRPH_FORMAT_BGR101111 7 +# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11) +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0 +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1 +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2 +# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3 +# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13) +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5 +# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6 +# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18) +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0 +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1 +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2 +# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3 # define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) # define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0 # define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index b937c49054d9..e00039e59a75 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -899,6 +899,10 @@ #define DB_HTILE_DATA_BASE 0x28014 #define DB_Z_INFO 0x28040 # define Z_ARRAY_MODE(x) ((x) << 4) +# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8) +# define DB_NUM_BANKS(x) (((x) & 0x3) << 12) +# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16) +# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20) #define DB_STENCIL_INFO 0x28044 #define DB_Z_READ_BASE 0x28048 #define DB_STENCIL_READ_BASE 0x2804c @@ -951,6 +955,29 @@ # define CB_SF_EXPORT_FULL 0 # define CB_SF_EXPORT_NORM 1 #define CB_COLOR0_ATTRIB 0x28c74 +# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5) +# define ADDR_SURF_TILE_SPLIT_64B 0 +# define ADDR_SURF_TILE_SPLIT_128B 1 +# define ADDR_SURF_TILE_SPLIT_256B 2 +# define ADDR_SURF_TILE_SPLIT_512B 3 +# define ADDR_SURF_TILE_SPLIT_1KB 4 +# define ADDR_SURF_TILE_SPLIT_2KB 5 +# define ADDR_SURF_TILE_SPLIT_4KB 6 +# define CB_NUM_BANKS(x) (((x) & 0x3) << 10) +# define ADDR_SURF_2_BANK 0 +# define ADDR_SURF_4_BANK 1 +# define ADDR_SURF_8_BANK 2 +# define ADDR_SURF_16_BANK 3 +# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13) +# define ADDR_SURF_BANK_WIDTH_1 0 +# define ADDR_SURF_BANK_WIDTH_2 1 +# define ADDR_SURF_BANK_WIDTH_4 2 +# define ADDR_SURF_BANK_WIDTH_8 3 +# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16) +# define ADDR_SURF_BANK_HEIGHT_1 0 +# define ADDR_SURF_BANK_HEIGHT_2 1 +# define ADDR_SURF_BANK_HEIGHT_4 2 +# define ADDR_SURF_BANK_HEIGHT_8 3 #define CB_COLOR0_DIM 0x28c78 /* only CB0-7 blocks have these regs */ #define CB_COLOR0_CMASK 0x28c7c @@ -1137,7 +1164,11 @@ # define SQ_SEL_1 5 #define SQ_TEX_RESOURCE_WORD5_0 0x30014 #define SQ_TEX_RESOURCE_WORD6_0 0x30018 +# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29) #define SQ_TEX_RESOURCE_WORD7_0 0x3001c +# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8) +# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10) +# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16) #define SQ_VTX_CONSTANT_WORD0_0 0x30000 #define SQ_VTX_CONSTANT_WORD1_0 0x30004 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index ad158ea49901..bfc08f6320f8 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; + int i; /* Lock the graphics update lock */ /* update the scanout addresses */ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); /* Wait for update_pending to go high. */ - while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 400b26df652a..c93bc64707e1 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p, return r; } - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - tile_flags |= R300_TXO_MACRO_TILE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - tile_flags |= R300_TXO_MICRO_TILE; - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) - tile_flags |= R300_TXO_MICRO_TILE_SQUARE; - - tmp = idx_value + ((u32)reloc->lobj.gpu_offset); - tmp |= tile_flags; - ib[idx] = tmp; + if (p->keep_tiling_flags) { + ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ + ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); + } else { + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_TXO_MACRO_TILE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_TXO_MICRO_TILE; + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + tile_flags |= R300_TXO_MICRO_TILE_SQUARE; + + tmp = idx_value + ((u32)reloc->lobj.gpu_offset); + tmp |= tile_flags; + ib[idx] = tmp; + } track->textures[i].robj = reloc->robj; track->tex_dirty = true; break; @@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p, /* RB3D_COLORPITCH1 */ /* RB3D_COLORPITCH2 */ /* RB3D_COLORPITCH3 */ - r = r100_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); - r100_cs_dump_packet(p, pkt); - return r; - } + if (!p->keep_tiling_flags) { + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - tile_flags |= R300_COLOR_TILE_ENABLE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - tile_flags |= R300_COLOR_MICROTILE_ENABLE; - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) - tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_COLOR_TILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_COLOR_MICROTILE_ENABLE; + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; - tmp = idx_value & ~(0x7 << 16); - tmp |= tile_flags; - ib[idx] = tmp; + tmp = idx_value & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + } i = (reg - 0x4E38) >> 2; track->cb[i].pitch = idx_value & 0x3FFE; switch (((idx_value >> 21) & 0xF)) { @@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; case 0x4F24: /* ZB_DEPTHPITCH */ - r = r100_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); - r100_cs_dump_packet(p, pkt); - return r; - } - - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - tile_flags |= R300_DEPTHMACROTILE_ENABLE; - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - tile_flags |= R300_DEPTHMICROTILE_TILED; - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) - tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; + if (!p->keep_tiling_flags) { + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } - tmp = idx_value & ~(0x7 << 16); - tmp |= tile_flags; - ib[idx] = tmp; + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_DEPTHMACROTILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_DEPTHMICROTILE_TILED; + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; + tmp = idx_value & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + } track->zb.pitch = idx_value & 0x3FFC; track->zb_dirty = true; break; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 19afc43ad173..9cdda0b3b081 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -288,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev) pcie_lanes); } -static int r600_pm_get_type_index(struct radeon_device *rdev, - enum radeon_pm_state_type ps_type, - int instance) -{ - int i; - int found_instance = -1; - - for (i = 0; i < rdev->pm.num_power_states; i++) { - if (rdev->pm.power_state[i].type == ps_type) { - found_instance++; - if (found_instance == instance) - return i; - } - } - /* return default if no match */ - return rdev->pm.default_power_state_index; -} - void rs780_pm_init_profile(struct radeon_device *rdev) { if (rdev->pm.num_power_states == 2) { @@ -421,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev) void r600_pm_init_profile(struct radeon_device *rdev) { + int idx; + if (rdev->family == CHIP_R600) { /* XXX */ /* default */ @@ -502,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; /* low sh */ - if (rdev->flags & RADEON_IS_MOBILITY) { - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; - } else { - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; - } + if (rdev->flags & RADEON_IS_MOBILITY) + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); + else + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; /* mid sh */ - if (rdev->flags & RADEON_IS_MOBILITY) { - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; - } else { - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; - } + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; /* high sh */ - rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; /* low mh */ - if (rdev->flags & RADEON_IS_MOBILITY) { - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; - } else { - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; - } + if (rdev->flags & RADEON_IS_MOBILITY) + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); + else + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; /* mid mh */ - if (rdev->flags & RADEON_IS_MOBILITY) { - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; - } else { - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; - } + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; /* high mh */ - rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); - rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = - r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; } diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 0a2e023c1557..cb1acffd2430 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) track->db_depth_control = radeon_get_ib_value(p, idx); break; case R_028010_DB_DEPTH_INFO: - if (r600_cs_packet_next_is_pkt3_nop(p)) { + if (!p->keep_tiling_flags && + r600_cs_packet_next_is_pkt3_nop(p)) { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_warn(p->dev, "bad SET_CONTEXT_REG " @@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case R_0280B4_CB_COLOR5_INFO: case R_0280B8_CB_COLOR6_INFO: case R_0280BC_CB_COLOR7_INFO: - if (r600_cs_packet_next_is_pkt3_nop(p)) { + if (!p->keep_tiling_flags && + r600_cs_packet_next_is_pkt3_nop(p)) { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); @@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, mip_offset <<= 8; word0 = radeon_get_ib_value(p, idx + 0); - if (tiling_flags & RADEON_TILING_MACRO) - word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); - else if (tiling_flags & RADEON_TILING_MICRO) - word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + if (tiling_flags & RADEON_TILING_MACRO) + word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); + else if (tiling_flags & RADEON_TILING_MICRO) + word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); + } word1 = radeon_get_ib_value(p, idx + 1); w0 = G_038000_TEX_WIDTH(word0) + 1; h0 = G_038004_TEX_HEIGHT(word1) + 1; @@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) - ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); + if (!p->keep_tiling_flags) { + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); + } texture = reloc->robj; /* tex mip base */ r = r600_cs_packet_next_reloc(p, &reloc); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b316b301152f..8227e76b5c70 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -611,7 +611,8 @@ struct radeon_cs_parser { struct radeon_ib *ib; void *track; unsigned family; - int parser_error; + int parser_error; + bool keep_tiling_flags; }; extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); @@ -784,8 +785,7 @@ struct radeon_pm_clock_info { struct radeon_power_state { enum radeon_pm_state_type type; - /* XXX: use a define for num clock modes */ - struct radeon_pm_clock_info clock_info[8]; + struct radeon_pm_clock_info *clock_info; /* number of valid clock modes in this power state */ int num_clock_modes; struct radeon_pm_clock_info *default_clock_mode; @@ -855,6 +855,9 @@ struct radeon_pm { struct device *int_hwmon_dev; }; +int radeon_pm_get_type_index(struct radeon_device *rdev, + enum radeon_pm_state_type ps_type, + int instance); /* * Benchmarking @@ -1142,6 +1145,48 @@ struct r600_vram_scratch { u64 gpu_addr; }; + +/* + * Mutex which allows recursive locking from the same process. + */ +struct radeon_mutex { + struct mutex mutex; + struct task_struct *owner; + int level; +}; + +static inline void radeon_mutex_init(struct radeon_mutex *mutex) +{ + mutex_init(&mutex->mutex); + mutex->owner = NULL; + mutex->level = 0; +} + +static inline void radeon_mutex_lock(struct radeon_mutex *mutex) +{ + if (mutex_trylock(&mutex->mutex)) { + /* The mutex was unlocked before, so it's ours now */ + mutex->owner = current; + } else if (mutex->owner != current) { + /* Another process locked the mutex, take it */ + mutex_lock(&mutex->mutex); + mutex->owner = current; + } + /* Otherwise the mutex was already locked by this process */ + + mutex->level++; +} + +static inline void radeon_mutex_unlock(struct radeon_mutex *mutex) +{ + if (--mutex->level > 0) + return; + + mutex->owner = NULL; + mutex_unlock(&mutex->mutex); +} + + /* * Core structure, functions and helpers. */ @@ -1197,7 +1242,7 @@ struct radeon_device { struct radeon_gem gem; struct radeon_pm pm; uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; - struct mutex cs_mutex; + struct radeon_mutex cs_mutex; struct radeon_wb wb; struct radeon_dummy_page dummy_page; bool gpu_lockup; diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 3f6636bb2d7f..3516a6081dcf 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle) /* Fail only if calling the method fails and ATIF is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status)); + DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", + acpi_format_exception(status)); kfree(buffer.pointer); return 1; } @@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev) acpi_handle handle; int ret; - /* No need to proceed if we're sure that ATIF is not supported */ - if (!ASIC_IS_AVIVO(rdev) || !rdev->bios) - return 0; - /* Get the device handle */ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + /* No need to proceed if we're sure that ATIF is not supported */ + if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) + return 0; + /* Call the ATIF method */ ret = radeon_atif_call(handle); if (ret) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e2944566ffea..a2e1eae114ef 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = { .pm_misc = &evergreen_pm_misc, .pm_prepare = &evergreen_pm_prepare, .pm_finish = &evergreen_pm_finish, - .pm_init_profile = &rs780_pm_init_profile, + .pm_init_profile = &sumo_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 85f14f0337e4..59914842a729 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p); extern void evergreen_pm_misc(struct radeon_device *rdev); extern void evergreen_pm_prepare(struct radeon_device *rdev); extern void evergreen_pm_finish(struct radeon_device *rdev); +extern void sumo_pm_init_profile(struct radeon_device *rdev); extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 08d0b94332e6..d24baf30efcb 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -62,6 +62,87 @@ union atom_supported_devices { struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; }; +static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev, + ATOM_GPIO_I2C_ASSIGMENT *gpio, + u8 index) +{ + /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ + if ((rdev->family == CHIP_R420) || + (rdev->family == CHIP_R423) || + (rdev->family == CHIP_RV410)) { + if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { + gpio->ucClkMaskShift = 0x19; + gpio->ucDataMaskShift = 0x18; + } + } + + /* some evergreen boards have bad data for this entry */ + if (ASIC_IS_DCE4(rdev)) { + if ((index == 7) && + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && + (gpio->sucI2cId.ucAccess == 0)) { + gpio->sucI2cId.ucAccess = 0x97; + gpio->ucDataMaskShift = 8; + gpio->ucDataEnShift = 8; + gpio->ucDataY_Shift = 8; + gpio->ucDataA_Shift = 8; + } + } + + /* some DCE3 boards have bad data for this entry */ + if (ASIC_IS_DCE3(rdev)) { + if ((index == 4) && + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && + (gpio->sucI2cId.ucAccess == 0x94)) + gpio->sucI2cId.ucAccess = 0x14; + } +} + +static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio) +{ + struct radeon_i2c_bus_rec i2c; + + memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); + + i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; + i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; + i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; + i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; + i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; + i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; + i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; + i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; + i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); + i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); + i2c.en_clk_mask = (1 << gpio->ucClkEnShift); + i2c.en_data_mask = (1 << gpio->ucDataEnShift); + i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); + i2c.y_data_mask = (1 << gpio->ucDataY_Shift); + i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); + i2c.a_data_mask = (1 << gpio->ucDataA_Shift); + + if (gpio->sucI2cId.sbfAccess.bfHW_Capable) + i2c.hw_capable = true; + else + i2c.hw_capable = false; + + if (gpio->sucI2cId.ucAccess == 0xa0) + i2c.mm_i2c = true; + else + i2c.mm_i2c = false; + + i2c.i2c_id = gpio->sucI2cId.ucAccess; + + if (i2c.mask_clk_reg) + i2c.valid = true; + else + i2c.valid = false; + + return i2c; +} + static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, uint8_t id) { @@ -85,59 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; - /* some evergreen boards have bad data for this entry */ - if (ASIC_IS_DCE4(rdev)) { - if ((i == 7) && - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && - (gpio->sucI2cId.ucAccess == 0)) { - gpio->sucI2cId.ucAccess = 0x97; - gpio->ucDataMaskShift = 8; - gpio->ucDataEnShift = 8; - gpio->ucDataY_Shift = 8; - gpio->ucDataA_Shift = 8; - } - } - - /* some DCE3 boards have bad data for this entry */ - if (ASIC_IS_DCE3(rdev)) { - if ((i == 4) && - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && - (gpio->sucI2cId.ucAccess == 0x94)) - gpio->sucI2cId.ucAccess = 0x14; - } + radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); if (gpio->sucI2cId.ucAccess == id) { - i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; - i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; - i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; - i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; - i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; - i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; - i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; - i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; - i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); - i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); - i2c.en_clk_mask = (1 << gpio->ucClkEnShift); - i2c.en_data_mask = (1 << gpio->ucDataEnShift); - i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); - i2c.y_data_mask = (1 << gpio->ucDataY_Shift); - i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); - i2c.a_data_mask = (1 << gpio->ucDataA_Shift); - - if (gpio->sucI2cId.sbfAccess.bfHW_Capable) - i2c.hw_capable = true; - else - i2c.hw_capable = false; - - if (gpio->sucI2cId.ucAccess == 0xa0) - i2c.mm_i2c = true; - else - i2c.mm_i2c = false; - - i2c.i2c_id = gpio->sucI2cId.ucAccess; - - if (i2c.mask_clk_reg) - i2c.valid = true; + i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); break; } } @@ -157,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) int i, num_indices; char stmp[32]; - memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); - if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); @@ -167,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; - i2c.valid = false; - - /* some evergreen boards have bad data for this entry */ - if (ASIC_IS_DCE4(rdev)) { - if ((i == 7) && - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && - (gpio->sucI2cId.ucAccess == 0)) { - gpio->sucI2cId.ucAccess = 0x97; - gpio->ucDataMaskShift = 8; - gpio->ucDataEnShift = 8; - gpio->ucDataY_Shift = 8; - gpio->ucDataA_Shift = 8; - } - } - /* some DCE3 boards have bad data for this entry */ - if (ASIC_IS_DCE3(rdev)) { - if ((i == 4) && - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && - (gpio->sucI2cId.ucAccess == 0x94)) - gpio->sucI2cId.ucAccess = 0x14; - } + radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); - i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; - i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; - i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; - i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; - i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; - i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; - i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; - i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; - i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); - i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); - i2c.en_clk_mask = (1 << gpio->ucClkEnShift); - i2c.en_data_mask = (1 << gpio->ucDataEnShift); - i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); - i2c.y_data_mask = (1 << gpio->ucDataY_Shift); - i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); - i2c.a_data_mask = (1 << gpio->ucDataA_Shift); - - if (gpio->sucI2cId.sbfAccess.bfHW_Capable) - i2c.hw_capable = true; - else - i2c.hw_capable = false; - - if (gpio->sucI2cId.ucAccess == 0xa0) - i2c.mm_i2c = true; - else - i2c.mm_i2c = false; + i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); - i2c.i2c_id = gpio->sucI2cId.ucAccess; - - if (i2c.mask_clk_reg) { - i2c.valid = true; + if (i2c.valid) { sprintf(stmp, "0x%x", i2c.i2c_id); rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); } @@ -1996,10 +1978,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) return state_index; /* last mode is usually default, array is low to high */ for (i = 0; i < num_modes; i++) { + rdev->pm.power_state[state_index].clock_info = + kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + if (!rdev->pm.power_state[state_index].clock_info) + return state_index; + rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; switch (frev) { case 1: - rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = @@ -2035,7 +2021,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) state_index++; break; case 2: - rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = @@ -2072,7 +2057,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) state_index++; break; case 3: - rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = @@ -2257,7 +2241,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; - if (ASIC_IS_DCE5(rdev)) { + if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) { /* NI chips post without MC ucode, so default clocks are strobe mode only */ rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; @@ -2377,17 +2361,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + (power_state->v1.ucNonClockStateIndex * power_info->pplib.ucNonClockSize)); - for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { - clock_info = (union pplib_clock_info *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + - (power_state->v1.ucClockStateIndices[j] * - power_info->pplib.ucClockInfoSize)); - valid = radeon_atombios_parse_pplib_clock_info(rdev, - state_index, mode_index, - clock_info); - if (valid) - mode_index++; + rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * + ((power_info->pplib.ucStateEntrySize - 1) ? + (power_info->pplib.ucStateEntrySize - 1) : 1), + GFP_KERNEL); + if (!rdev->pm.power_state[i].clock_info) + return state_index; + if (power_info->pplib.ucStateEntrySize - 1) { + for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { + clock_info = (union pplib_clock_info *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + + (power_state->v1.ucClockStateIndices[j] * + power_info->pplib.ucClockInfoSize)); + valid = radeon_atombios_parse_pplib_clock_info(rdev, + state_index, mode_index, + clock_info); + if (valid) + mode_index++; + } + } else { + rdev->pm.power_state[state_index].clock_info[0].mclk = + rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[0].sclk = + rdev->clock.default_sclk; + mode_index++; } rdev->pm.power_state[state_index].num_clock_modes = mode_index; if (mode_index) { @@ -2456,18 +2454,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; - for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = power_state->v2.clockInfoIndex[j]; - /* XXX this might be an inagua bug... */ - if (clock_array_index >= clock_info_array->ucNumEntries) - continue; - clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index]; - valid = radeon_atombios_parse_pplib_clock_info(rdev, - state_index, mode_index, - clock_info); - if (valid) - mode_index++; + rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * + (power_state->v2.ucNumDPMLevels ? + power_state->v2.ucNumDPMLevels : 1), + GFP_KERNEL); + if (!rdev->pm.power_state[i].clock_info) + return state_index; + if (power_state->v2.ucNumDPMLevels) { + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = power_state->v2.clockInfoIndex[j]; + /* XXX this might be an inagua bug... */ + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index]; + valid = radeon_atombios_parse_pplib_clock_info(rdev, + state_index, mode_index, + clock_info); + if (valid) + mode_index++; + } + } else { + rdev->pm.power_state[state_index].clock_info[0].mclk = + rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[0].sclk = + rdev->clock.default_sclk; + mode_index++; } rdev->pm.power_state[state_index].num_clock_modes = mode_index; if (mode_index) { @@ -2524,19 +2536,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) } else { rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); if (rdev->pm.power_state) { - /* add the default mode */ - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.power_state[state_index].num_clock_modes = 1; - rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; - rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; - rdev->pm.power_state[state_index].pcie_lanes = 16; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].flags = 0; - state_index++; + rdev->pm.power_state[0].clock_info = + kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + if (rdev->pm.power_state[0].clock_info) { + /* add the default mode */ + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + rdev->pm.power_state[state_index].pcie_lanes = 16; + rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].flags = 0; + state_index++; + } } } diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 5cafc90de7f8..17e1a9b2d8fb 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, struct radeon_bo *sobj = NULL; uint64_t saddr, daddr; int r, n; - unsigned int time; + int time; n = RADEON_BENCHMARK_ITERATIONS; r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 8bf83c4b4147..81fc100be7e1 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) /* allocate 2 power states */ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); - if (!rdev->pm.power_state) { - rdev->pm.default_power_state_index = state_index; - rdev->pm.num_power_states = 0; - - rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; - rdev->pm.current_clock_mode_index = 0; - return; - } + if (rdev->pm.power_state) { + /* allocate 1 clock mode per state */ + rdev->pm.power_state[0].clock_info = + kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + rdev->pm.power_state[1].clock_info = + kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); + if (!rdev->pm.power_state[0].clock_info || + !rdev->pm.power_state[1].clock_info) + goto pm_failed; + } else + goto pm_failed; /* check for a thermal chip */ offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); @@ -2735,6 +2738,14 @@ default_mode: rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; + return; + +pm_failed: + rdev->pm.default_power_state_index = state_index; + rdev->pm.num_power_states = 0; + + rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.current_clock_mode_index = 0; } void radeon_external_tmds_setup(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index fae00c0d75aa..29afd71e0840 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) { struct drm_radeon_cs *cs = data; uint64_t *chunk_array_ptr; - unsigned size, i; + unsigned size, i, flags = 0; if (!cs->num_chunks) { return 0; @@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) if (p->chunks[i].length_dw == 0) return -EINVAL; } + if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS && + !p->chunks[i].length_dw) { + return -EINVAL; + } p->chunks[i].length_dw = user_chunk.length_dw; p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; @@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) p->chunks[i].user_ptr, size)) { return -EFAULT; } + if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { + flags = p->chunks[i].kdata[0]; + } } else { p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); @@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) p->chunks[p->chunk_ib_idx].length_dw); return -EINVAL; } + + p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0; return 0; } @@ -222,7 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct radeon_cs_chunk *ib_chunk; int r; - mutex_lock(&rdev->cs_mutex); + radeon_mutex_lock(&rdev->cs_mutex); /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; @@ -233,14 +242,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r) { DRM_ERROR("Failed to initialize parser !\n"); radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_ib_get(rdev, &parser.ib); if (r) { DRM_ERROR("Failed to get ib !\n"); radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_parser_relocs(&parser); @@ -248,7 +257,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r != -ERESTARTSYS) DRM_ERROR("Failed to parse relocation %d!\n", r); radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } /* Copy the packet into the IB, the parser will read from the @@ -260,14 +269,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r || parser.parser_error) { DRM_ERROR("Invalid command stream !\n"); radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_finish_pages(&parser); if (r) { DRM_ERROR("Invalid command stream !\n"); radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_ib_schedule(rdev, parser.ib); @@ -275,7 +284,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) DRM_ERROR("Failed to schedule IB !\n"); } radeon_cs_parser_fini(&parser, r); - mutex_unlock(&rdev->cs_mutex); + radeon_mutex_unlock(&rdev->cs_mutex); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c33bc914d93d..c4d00a171411 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev, /* mutex initialization are all done here so we * can recall function without having locking issues */ - mutex_init(&rdev->cs_mutex); + radeon_mutex_init(&rdev->cs_mutex); mutex_init(&rdev->ib_pool.mutex); mutex_init(&rdev->cp.mutex); mutex_init(&rdev->dc_hw_i2c_mutex); @@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev) int r; int resched; + /* Prevent CS ioctl from interfering */ + radeon_mutex_lock(&rdev->cs_mutex); + radeon_save_bios_scratch_regs(rdev); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); @@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev) radeon_restore_bios_scratch_regs(rdev); drm_helper_resume_force_mode(rdev->ddev); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); - return 0; } - /* bad news, how to tell it to userspace ? */ - dev_info(rdev->dev, "GPU reset failed\n"); + + radeon_mutex_unlock(&rdev->cs_mutex); + + if (r) { + /* bad news, how to tell it to userspace ? */ + dev_info(rdev->dev, "GPU reset failed\n"); + } + return r; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index a0b35e909489..71499fc3daf5 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -53,9 +53,10 @@ * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query * 2.10.0 - fusion 2D tiling * 2.11.0 - backend map, initial compute support for the CS checker + * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 11 +#define KMS_DRIVER_MINOR 12 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 06e413e6a920..4b27efa4405b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder) switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_TRAVIS: case ENCODER_OBJECT_ID_NUTMEG: - return true; + return radeon_encoder->encoder_id; default: - return false; + return ENCODER_OBJECT_ID_NONE; } } - - return false; + return ENCODER_OBJECT_ID_NONE; } void radeon_panel_mode_fixup(struct drm_encoder *encoder, diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 41a5d48e657b..daadf2111040 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; - - /* adjust pm to upcoming mode change */ - radeon_pm_compute_clocks(rdev); - if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; return true; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6fabe89fa6a1..78a665bd9519 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev); #define ACPI_AC_CLASS "ac_adapter" +int radeon_pm_get_type_index(struct radeon_device *rdev, + enum radeon_pm_state_type ps_type, + int instance) +{ + int i; + int found_instance = -1; + + for (i = 0; i < rdev->pm.num_power_states; i++) { + if (rdev->pm.power_state[i].type == ps_type) { + found_instance++; + if (found_instance == instance) + return i; + } + } + /* return default if no match */ + return rdev->pm.default_power_state_index; +} + #ifdef CONFIG_ACPI static int radeon_acpi_event(struct notifier_block *nb, unsigned long val, diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 481b99e89f65..b1053d640423 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); + int i; /* Lock the graphics update lock */ tmp |= AVIVO_D1GRPH_UPDATE_LOCK; @@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) (u32)crtc_base); /* Wait for update_pending to go high. */ - while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index a983f410ab89..23ae1c60ab3d 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); + int i; /* Lock the graphics update lock */ tmp |= AVIVO_D1GRPH_UPDATE_LOCK; @@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) (u32)crtc_base); /* Wait for update_pending to go high. */ - while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 617b64678fc6..0bb0f5f713e6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -574,10 +574,16 @@ retry: return ret; spin_lock(&glob->lru_lock); + + if (unlikely(list_empty(&bo->ddestroy))) { + spin_unlock(&glob->lru_lock); + return 0; + } + ret = ttm_bo_reserve_locked(bo, interruptible, no_wait_reserve, false, 0); - if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { + if (unlikely(ret != 0)) { spin_unlock(&glob->lru_lock); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 3f6343502d1f..5ff561d4e0b4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -140,7 +140,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, goto out_clips; } - clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); + clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; @@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, goto out_clips; } - clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); + clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 03daefa73397..37d40545ed77 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -105,6 +105,10 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, struct vmw_dma_buffer *dmabuf = NULL; int ret; + /* A lot of the code assumes this */ + if (handle && (width != 64 || height != 64)) + return -EINVAL; + if (handle) { ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, &surface); @@ -410,8 +414,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv, top = clips->y1; bottom = clips->y2; - clips_ptr = clips; - for (i = 1; i < num_clips; i++, clips_ptr += inc) { + /* skip the first clip rect */ + for (i = 1, clips_ptr = clips + inc; + i < num_clips; i++, clips_ptr += inc) { left = min_t(int, left, (int)clips_ptr->x1); right = max_t(int, right, (int)clips_ptr->x2); top = min_t(int, top, (int)clips_ptr->y1); @@ -1323,7 +1328,10 @@ int vmw_kms_close(struct vmw_private *dev_priv) * drm_encoder_cleanup which takes the lock we deadlock. */ drm_mode_config_cleanup(dev_priv->dev); - vmw_kms_close_legacy_display_system(dev_priv); + if (dev_priv->sou_priv) + vmw_kms_close_screen_object_display(dev_priv); + else + vmw_kms_close_legacy_display_system(dev_priv); return 0; } @@ -1801,7 +1809,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); - rects = kzalloc(rects_size, GFP_KERNEL); + rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), + GFP_KERNEL); if (unlikely(!rects)) { ret = -ENOMEM; goto out_unlock; @@ -1816,10 +1825,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, } for (i = 0; i < arg->num_outputs; ++i) { - if (rects->x < 0 || - rects->y < 0 || - rects->x + rects->w > mode_config->max_width || - rects->y + rects->h > mode_config->max_height) { + if (rects[i].x < 0 || + rects[i].y < 0 || + rects[i].x + rects[i].w > mode_config->max_width || + rects[i].y + rects[i].h > mode_config->max_height) { DRM_ERROR("Invalid GUI layout.\n"); ret = -EINVAL; goto out_free; diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index c72f1c0b5e63..111d956d8e7d 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -465,31 +465,29 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev) while (new_bus) { new_bridge = new_bus->self; - if (new_bridge) { - /* go through list of devices already registered */ - list_for_each_entry(same_bridge_vgadev, &vga_list, list) { - bus = same_bridge_vgadev->pdev->bus; - bridge = bus->self; - - /* see if the share a bridge with this device */ - if (new_bridge == bridge) { - /* if their direct parent bridge is the same - as any bridge of this device then it can't be used - for that device */ - same_bridge_vgadev->bridge_has_one_vga = false; - } + /* go through list of devices already registered */ + list_for_each_entry(same_bridge_vgadev, &vga_list, list) { + bus = same_bridge_vgadev->pdev->bus; + bridge = bus->self; + + /* see if the share a bridge with this device */ + if (new_bridge == bridge) { + /* if their direct parent bridge is the same + as any bridge of this device then it can't be used + for that device */ + same_bridge_vgadev->bridge_has_one_vga = false; + } - /* now iterate the previous devices bridge hierarchy */ - /* if the new devices parent bridge is in the other devices - hierarchy then we can't use it to control this device */ - while (bus) { - bridge = bus->self; - if (bridge) { - if (bridge == vgadev->pdev->bus->self) - vgadev->bridge_has_one_vga = false; - } - bus = bus->parent; + /* now iterate the previous devices bridge hierarchy */ + /* if the new devices parent bridge is in the other devices + hierarchy then we can't use it to control this device */ + while (bus) { + bridge = bus->self; + if (bridge) { + if (bridge == vgadev->pdev->bus->self) + vgadev->bridge_has_one_vga = false; } + bus = bus->parent; } } new_bus = new_bus->parent; @@ -993,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf, uc = &priv->cards[i]; } - if (!uc) - return -EINVAL; + if (!uc) { + ret_val = -EINVAL; + goto done; + } - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) - return -EINVAL; + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) { + ret_val = -EINVAL; + goto done; + } - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) - return -EINVAL; + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) { + ret_val = -EINVAL; + goto done; + } vga_put(pdev, io_state); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 848a56c0279c..af353842f75f 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, + { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) }, { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 06ce996b8b65..4a441a6f9967 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -266,7 +266,7 @@ #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002 #define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc -#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001 +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003 #define USB_VENDOR_ID_GLAB 0x06c2 #define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 9ec854ae118b..91be41f60809 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -315,7 +315,7 @@ config SENSORS_DS1621 config SENSORS_EXYNOS4_TMU tristate "Temperature sensor on Samsung EXYNOS4" - depends on EXYNOS4_DEV_TMU + depends on ARCH_EXYNOS4 help If you say yes here you get support for TMU (Thermal Managment Unit) on SAMSUNG EXYNOS4 series of SoC. diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index 318e38e85376..5d760f3d21c2 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c @@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id); static struct spi_driver ad7314_driver = { .driver = { .name = "ad7314", - .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = ad7314_probe, diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c index 52319340e182..04450f8bf5da 100644 --- a/drivers/hwmon/ads7871.c +++ b/drivers/hwmon/ads7871.c @@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi) static struct spi_driver ads7871_driver = { .driver = { .name = DEVICE_NAME, - .bus = &spi_bus_type, .owner = THIS_MODULE, }, diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c index faa0884f61f6..f2359a0093bd 100644 --- a/drivers/hwmon/exynos4_tmu.c +++ b/drivers/hwmon/exynos4_tmu.c @@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = { .resume = exynos4_tmu_resume, }; -static int __init exynos4_tmu_driver_init(void) -{ - return platform_driver_register(&exynos4_tmu_driver); -} -module_init(exynos4_tmu_driver_init); - -static void __exit exynos4_tmu_driver_exit(void) -{ - platform_driver_unregister(&exynos4_tmu_driver); -} -module_exit(exynos4_tmu_driver_exit); +module_platform_driver(exynos4_tmu_driver); MODULE_DESCRIPTION("EXYNOS4 TMU Driver"); MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>"); diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index 89aa9fb743af..9ba38f318ffb 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = { }, }; -static int __init gpio_fan_init(void) -{ - return platform_driver_register(&gpio_fan_driver); -} - -static void __exit gpio_fan_exit(void) -{ - platform_driver_unregister(&gpio_fan_driver); -} - -module_init(gpio_fan_init); -module_exit(gpio_fan_exit); +module_platform_driver(gpio_fan_driver); MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>"); MODULE_DESCRIPTION("GPIO FAN driver"); diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c index fea292d43407..7a48b1eb4233 100644 --- a/drivers/hwmon/jz4740-hwmon.c +++ b/drivers/hwmon/jz4740-hwmon.c @@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = { }, }; -static int __init jz4740_hwmon_init(void) -{ - return platform_driver_register(&jz4740_hwmon_driver); -} -module_init(jz4740_hwmon_init); - -static void __exit jz4740_hwmon_exit(void) -{ - platform_driver_unregister(&jz4740_hwmon_driver); -} -module_exit(jz4740_hwmon_exit); +module_platform_driver(jz4740_hwmon_driver); MODULE_DESCRIPTION("JZ4740 SoC HWMON driver"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index eab11615dced..9b382ec2c3bd 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = { .id_table = ntc_thermistor_id, }; -static int __init ntc_thermistor_init(void) -{ - return platform_driver_register(&ntc_thermistor_driver); -} - -module_init(ntc_thermistor_init); - -static void __exit ntc_thermistor_cleanup(void) -{ - platform_driver_unregister(&ntc_thermistor_driver); -} - -module_exit(ntc_thermistor_cleanup); +module_platform_driver(ntc_thermistor_driver); MODULE_DESCRIPTION("NTC Thermistor Driver"); MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c index b39f52e2752a..f6c26d19f521 100644 --- a/drivers/hwmon/s3c-hwmon.c +++ b/drivers/hwmon/s3c-hwmon.c @@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = { .remove = __devexit_p(s3c_hwmon_remove), }; -static int __init s3c_hwmon_init(void) -{ - return platform_driver_register(&s3c_hwmon_driver); -} - -static void __exit s3c_hwmon_exit(void) -{ - platform_driver_unregister(&s3c_hwmon_driver); -} - -module_init(s3c_hwmon_init); -module_exit(s3c_hwmon_exit); +module_platform_driver(s3c_hwmon_driver); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("S3C ADC HWMon driver"); diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c index e3b5c6039c25..79b6dabe3161 100644 --- a/drivers/hwmon/sch5627.c +++ b/drivers/hwmon/sch5627.c @@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = { .remove = sch5627_remove, }; -static int __init sch5627_init(void) -{ - return platform_driver_register(&sch5627_driver); -} - -static void __exit sch5627_exit(void) -{ - platform_driver_unregister(&sch5627_driver); -} +module_platform_driver(sch5627_driver); MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver"); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_LICENSE("GPL"); - -module_init(sch5627_init); -module_exit(sch5627_exit); diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c index 244407aa79fc..9d5236fb09b4 100644 --- a/drivers/hwmon/sch5636.c +++ b/drivers/hwmon/sch5636.c @@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = { .remove = sch5636_remove, }; -static int __init sch5636_init(void) -{ - return platform_driver_register(&sch5636_driver); -} - -static void __exit sch5636_exit(void) -{ - platform_driver_unregister(&sch5636_driver); -} +module_platform_driver(sch5636_driver); MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver"); MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_LICENSE("GPL"); - -module_init(sch5636_init); -module_exit(sch5636_exit); diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c index 57240740b161..0018c7dd0097 100644 --- a/drivers/hwmon/twl4030-madc-hwmon.c +++ b/drivers/hwmon/twl4030-madc-hwmon.c @@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = { }, }; -static int __init twl4030_madc_hwmon_init(void) -{ - return platform_driver_register(&twl4030_madc_hwmon_driver); -} - -module_init(twl4030_madc_hwmon_init); - -static void __exit twl4030_madc_hwmon_exit(void) -{ - platform_driver_unregister(&twl4030_madc_hwmon_driver); -} - -module_exit(twl4030_madc_hwmon_exit); +module_platform_driver(twl4030_madc_hwmon_driver); MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c index 3cd07bf42dca..b9a87e89bab4 100644 --- a/drivers/hwmon/ultra45_env.c +++ b/drivers/hwmon/ultra45_env.c @@ -309,15 +309,4 @@ static struct platform_driver env_driver = { .remove = __devexit_p(env_remove), }; -static int __init env_init(void) -{ - return platform_driver_register(&env_driver); -} - -static void __exit env_exit(void) -{ - platform_driver_unregister(&env_driver); -} - -module_init(env_init); -module_exit(env_exit); +module_platform_driver(env_driver); diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c index 97b1f834a471..9b598ed26020 100644 --- a/drivers/hwmon/wm831x-hwmon.c +++ b/drivers/hwmon/wm831x-hwmon.c @@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = { }, }; -static int __init wm831x_hwmon_init(void) -{ - return platform_driver_register(&wm831x_hwmon_driver); -} -module_init(wm831x_hwmon_init); - -static void __exit wm831x_hwmon_exit(void) -{ - platform_driver_unregister(&wm831x_hwmon_driver); -} -module_exit(wm831x_hwmon_exit); +module_platform_driver(wm831x_hwmon_driver); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("WM831x Hardware Monitoring"); diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c index 13290595ca86..3ff67edbdc44 100644 --- a/drivers/hwmon/wm8350-hwmon.c +++ b/drivers/hwmon/wm8350-hwmon.c @@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = { }, }; -static int __init wm8350_hwmon_init(void) -{ - return platform_driver_register(&wm8350_hwmon_driver); -} -module_init(wm8350_hwmon_init); - -static void __exit wm8350_hwmon_exit(void) -{ - platform_driver_unregister(&wm8350_hwmon_driver); -} -module_exit(wm8350_hwmon_exit); +module_platform_driver(wm8350_hwmon_driver); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("WM8350 Hardware Monitoring"); diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c index 143461a95ae4..86980fe04117 100644 --- a/drivers/hwspinlock/u8500_hsem.c +++ b/drivers/hwspinlock/u8500_hsem.c @@ -21,6 +21,7 @@ * General Public License for more details. */ +#include <linux/module.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/pm_runtime.h> @@ -108,10 +109,8 @@ static int __devinit u8500_hsem_probe(struct platform_device *pdev) return -ENODEV; io_base = ioremap(res->start, resource_size(res)); - if (!io_base) { - ret = -ENOMEM; - goto free_state; - } + if (!io_base) + return -ENOMEM; /* make sure protocol 1 is selected */ val = readl(io_base + HSEM_CTRL_REG); diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index 85584a547c25..525c7345fa0b 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -488,7 +488,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) if (flags & I2C_M_TEN) { /* a ten bit address */ - addr = 0xf0 | ((msg->addr >> 7) & 0x03); + addr = 0xf0 | ((msg->addr >> 7) & 0x06); bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr); /* try extended address code...*/ ret = try_address(i2c_adap, addr, retries); @@ -498,7 +498,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) return -ENXIO; } /* the remaining 8 bit address */ - ret = i2c_outb(i2c_adap, msg->addr & 0x7f); + ret = i2c_outb(i2c_adap, msg->addr & 0xff); if ((ret != 1) && !nak_ok) { /* the chip did not ack / xmission error occurred */ dev_err(&i2c_adap->dev, "died at 2nd address code\n"); diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c index 835e47b39bc2..03b615778887 100644 --- a/drivers/i2c/busses/i2c-nuc900.c +++ b/drivers/i2c/busses/i2c-nuc900.c @@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev) i2c->adap.algo_data = i2c; i2c->adap.dev.parent = &pdev->dev; - mfp_set_groupg(&pdev->dev); + mfp_set_groupg(&pdev->dev, NULL); clk_get_rate(i2c->clk); diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 131079a3e292..1e5606185b4f 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -539,8 +539,10 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) client->dev.type = &i2c_client_type; client->dev.of_node = info->of_node; + /* For 10-bit clients, add an arbitrary offset to avoid collisions */ dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap), - client->addr); + client->addr | ((client->flags & I2C_CLIENT_TEN) + ? 0xa000 : 0)); status = device_register(&client->dev); if (status) goto out_err; diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index c90ce50b619f..57a45ce84b2d 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -579,7 +579,7 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy) return 0; } -int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, +static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c index 67cbcfa35122..847553fd8b96 100644 --- a/drivers/ide/cy82c693.c +++ b/drivers/ide/cy82c693.c @@ -1,7 +1,7 @@ /* * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator - * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz + * Copyright (C) 2007-2011 Bartlomiej Zolnierkiewicz * * CYPRESS CY82C693 chipset IDE controller * @@ -90,7 +90,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) u8 time_16, time_8; /* select primary or secondary channel */ - if (hwif->index > 0) { /* drive is on the secondary channel */ + if (drive->dn > 1) { /* drive is on the secondary channel */ dev = pci_get_slot(dev->bus, dev->devfn+1); if (!dev) { printk(KERN_ERR "%s: tune_drive: " @@ -141,7 +141,7 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16); pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8); } - if (hwif->index > 0) + if (drive->dn > 1) pci_dev_put(dev); } diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 4a697a238e28..8716066a2f2b 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c @@ -521,8 +521,8 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { d.init_dma = icside_dma_init; d.port_ops = &icside_v6_port_ops; + } else d.dma_ops = NULL; - } ret = ide_host_register(host, &d, hws); if (ret) diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 04b09564bfa9..8126824daccb 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -43,7 +43,6 @@ /* For SCSI -> ATAPI command conversion */ #include <scsi/scsi.h> -#include <linux/irq.h> #include <linux/io.h> #include <asm/byteorder.h> #include <linux/uaccess.h> diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 61fdf544fbd6..3d42043fec51 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -35,7 +35,6 @@ #include <scsi/scsi_ioctl.h> #include <asm/byteorder.h> -#include <linux/irq.h> #include <linux/uaccess.h> #include <linux/io.h> #include <asm/unaligned.h> diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 7ecb1ade8874..ce8237d36159 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -41,7 +41,6 @@ #include <scsi/scsi.h> #include <asm/byteorder.h> -#include <linux/irq.h> #include <linux/uaccess.h> #include <linux/io.h> #include <asm/unaligned.h> diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c index b59d04c72051..1892e81fb00f 100644 --- a/drivers/ide/piix.c +++ b/drivers/ide/piix.c @@ -331,7 +331,7 @@ static const struct ide_port_ops ich_port_ops = { .udma_mask = udma, \ } -#define DECLARE_ICH_DEV(udma) \ +#define DECLARE_ICH_DEV(mwdma, udma) \ { \ .name = DRV_NAME, \ .init_chipset = init_chipset_ich, \ @@ -340,7 +340,7 @@ static const struct ide_port_ops ich_port_ops = { .port_ops = &ich_port_ops, \ .pio_mask = ATA_PIO4, \ .swdma_mask = ATA_SWDMA2_ONLY, \ - .mwdma_mask = ATA_MWDMA12_ONLY, \ + .mwdma_mask = mwdma, \ .udma_mask = udma, \ } @@ -362,13 +362,15 @@ static const struct ide_port_info piix_pci_info[] __devinitdata = { /* 2: PIIX4 */ DECLARE_PIIX_DEV(ATA_UDMA2), /* 3: ICH0 */ - DECLARE_ICH_DEV(ATA_UDMA2), + DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2), /* 4: ICH */ - DECLARE_ICH_DEV(ATA_UDMA4), + DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4), /* 5: PIIX4 */ DECLARE_PIIX_DEV(ATA_UDMA4), - /* 6: ICH[2-7]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */ - DECLARE_ICH_DEV(ATA_UDMA5), + /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */ + DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5), + /* 7: ICH7/7-R, no MWDMA1 */ + DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5), }; /** @@ -438,9 +440,9 @@ static const struct pci_device_id piix_pci_tbl[] = { #endif { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 6 }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 }, - { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 6 }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 }, { 0, }, }; diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c index e53a1b78378b..281c91426345 100644 --- a/drivers/ide/triflex.c +++ b/drivers/ide/triflex.c @@ -113,12 +113,26 @@ static const struct pci_device_id triflex_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, triflex_pci_tbl); +#ifdef CONFIG_PM +static int triflex_ide_pci_suspend(struct pci_dev *dev, pm_message_t state) +{ + /* + * We must not disable or powerdown the device. + * APM bios refuses to suspend if IDE is not accessible. + */ + pci_save_state(dev); + return 0; +} +#else +#define triflex_ide_pci_suspend NULL +#endif + static struct pci_driver triflex_pci_driver = { .name = "TRIFLEX_IDE", .id_table = triflex_pci_tbl, .probe = triflex_init_one, .remove = ide_pci_remove, - .suspend = ide_pci_suspend, + .suspend = triflex_ide_pci_suspend, .resume = ide_pci_resume, }; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 691276bafd78..e9cf51b1343b 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -216,7 +216,9 @@ static int addr4_resolve(struct sockaddr_in *src_in, neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev); if (!neigh || !(neigh->nud_state & NUD_VALID)) { + rcu_read_lock(); neigh_event_send(dst_get_neighbour(&rt->dst), NULL); + rcu_read_unlock(); ret = -ENODATA; if (neigh) goto release; @@ -274,15 +276,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, goto put; } + rcu_read_lock(); neigh = dst_get_neighbour(dst); if (!neigh || !(neigh->nud_state & NUD_VALID)) { if (neigh) neigh_event_send(neigh, NULL); ret = -ENODATA; - goto put; + } else { + ret = rdma_copy_addr(addr, dst->dev, neigh->ha); } - - ret = rdma_copy_addr(addr, dst->dev, neigh->ha); + rcu_read_unlock(); put: dst_release(dst); return ret; diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index de6d0774e609..c88b12beef25 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1375,8 +1375,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) goto reject; } dst = &rt->dst; + rcu_read_lock(); neigh = dst_get_neighbour(dst); l2t = t3_l2t_get(tdev, neigh, neigh->dev); + rcu_read_unlock(); if (!l2t) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); @@ -1946,10 +1948,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } ep->dst = &rt->dst; + rcu_read_lock(); neigh = dst_get_neighbour(ep->dst); /* get a l2t entry */ ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev); + rcu_read_unlock(); if (!ep->l2t) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); err = -ENOMEM; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b36cdac9c558..0747004313ad 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); mpa->private_data_size = htons(ep->plen); mpa->revision = mpa_rev_to_use; - if (mpa_rev_to_use == 1) + if (mpa_rev_to_use == 1) { ep->tried_with_mpa_v1 = 1; + ep->retry_with_mpa_v1 = 0; + } if (mpa_rev_to_use == 2) { mpa->private_data_size += @@ -1594,6 +1596,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) goto reject; } dst = &rt->dst; + rcu_read_lock(); neigh = dst_get_neighbour(dst); if (neigh->dev->flags & IFF_LOOPBACK) { pdev = ip_dev_find(&init_net, peer_ip); @@ -1620,6 +1623,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) rss_qid = dev->rdev.lldi.rxq_ids[ cxgb4_port_idx(neigh->dev) * step]; } + rcu_read_unlock(); if (!l2t) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); @@ -1820,6 +1824,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) } ep->dst = &rt->dst; + rcu_read_lock(); neigh = dst_get_neighbour(ep->dst); /* get a l2t entry */ @@ -1856,6 +1861,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ cxgb4_port_idx(neigh->dev) * step]; } + rcu_read_unlock(); if (!ep->l2t) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); err = -ENOMEM; @@ -2301,6 +2307,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } ep->dst = &rt->dst; + rcu_read_lock(); neigh = dst_get_neighbour(ep->dst); /* get a l2t entry */ @@ -2339,6 +2346,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ep->retry_with_mpa_v1 = 0; ep->tried_with_mpa_v1 = 0; } + rcu_read_unlock(); if (!ep->l2t) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); err = -ENOMEM; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index f35a935267e7..0f1607c8325a 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) while (ptr != cq->sw_pidx) { cqe = &cq->sw_queue[ptr]; if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && - (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq)) + (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) (*count)++; if (++ptr == cq->size) ptr = 0; diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index dfce9ea98a39..0a52d72371ee 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1377,9 +1377,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi neigh_release(neigh); } - if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) + if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) { + rcu_read_lock(); neigh_event_send(dst_get_neighbour(&rt->dst), NULL); - + rcu_read_unlock(); + } ip_rt_put(rt); return rc; } diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 5bd2162b95dc..1d5895941e19 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) SYM_LSB(IBCCtrlA_0, MaxPktLen); ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */ - /* initially come up waiting for TS1, without sending anything. */ - val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << - QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); - - ppd->cpspec->ibcctrl_a = val; /* * Reset the PCS interface to the serdes (and also ibc, which is still * in reset from above). Writes new value of ibcctrl_a as last step. */ qib_7322_mini_pcs_reset(ppd); - qib_write_kreg(dd, kr_scratch, 0ULL); - /* clear the linkinit cmds */ - ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd); if (!ppd->cpspec->ibcctrl_b) { unsigned lse = ppd->link_speed_enabled; @@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn); set_vls(ppd); + /* initially come up DISABLED, without sending anything. */ + val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE << + QLOGIC_IB_IBCC_LINKINITCMD_SHIFT); + qib_write_kreg_port(ppd, krp_ibcctrl_a, val); + qib_write_kreg(dd, kr_scratch, 0ULL); + /* clear the linkinit cmds */ + ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd); + /* be paranoid against later code motion, etc. */ spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags); ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable); @@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) off */ if (ppd->dd->flags & QIB_HAS_QSFP) { qd->t_insert = get_jiffies_64(); - schedule_work(&qd->work); + queue_work(ib_wq, &qd->work); } spin_lock_irqsave(&ppd->sdma_lock, flags); if (__qib_sdma_running(ppd)) diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index e06c4ed383f1..fa71b1e666c5 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c @@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, udelay(20); /* Generous RST dwell */ dd->f_gpio_mod(dd, mask, mask, mask); - /* Spec says module can take up to two seconds! */ - mask = QSFP_GPIO_MOD_PRS_N; - if (qd->ppd->hw_pidx) - mask <<= QSFP_GPIO_PORT2_SHIFT; - - /* Do not try to wait here. Better to let event handle it */ - if (!qib_qsfp_mod_present(qd->ppd)) - goto bail; - /* We see a module, but it may be unwise to look yet. Just schedule */ - qd->t_insert = get_jiffies_64(); - queue_work(ib_wq, &qd->work); -bail: return; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0ef9af94997d..4115be54ba3b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev, struct ib_pd *pd, struct ib_ah_attr *attr) { struct ipoib_ah *ah; + struct ib_ah *vah; ah = kmalloc(sizeof *ah, GFP_KERNEL); if (!ah) - return NULL; + return ERR_PTR(-ENOMEM); ah->dev = dev; ah->last_send = 0; kref_init(&ah->ref); - ah->ah = ib_create_ah(pd, attr); - if (IS_ERR(ah->ah)) { + vah = ib_create_ah(pd, attr); + if (IS_ERR(vah)) { kfree(ah); - ah = NULL; - } else + ah = (struct ipoib_ah *)vah; + } else { + ah->ah = vah; ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); + } return ah; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 7567b6000230..83695b48b010 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -432,7 +432,7 @@ static void path_rec_completion(int status, spin_lock_irqsave(&priv->lock, flags); - if (ah) { + if (!IS_ERR_OR_NULL(ah)) { path->pathrec = *pathrec; old_ah = path->ah; @@ -555,6 +555,7 @@ static int path_rec_start(struct net_device *dev, return 0; } +/* called with rcu_read_lock */ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -636,6 +637,7 @@ err_drop: spin_unlock_irqrestore(&priv->lock, flags); } +/* called with rcu_read_lock */ static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(skb->dev); @@ -720,13 +722,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) struct neighbour *n = NULL; unsigned long flags; + rcu_read_lock(); if (likely(skb_dst(skb))) n = dst_get_neighbour(skb_dst(skb)); if (likely(n)) { if (unlikely(!*to_ipoib_neigh(n))) { ipoib_path_lookup(skb, dev); - return NETDEV_TX_OK; + goto unlock; } neigh = *to_ipoib_neigh(n); @@ -749,17 +752,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ipoib_neigh_free(dev, neigh); spin_unlock_irqrestore(&priv->lock, flags); ipoib_path_lookup(skb, dev); - return NETDEV_TX_OK; + goto unlock; } if (ipoib_cm_get(neigh)) { if (ipoib_cm_up(neigh)) { ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); - return NETDEV_TX_OK; + goto unlock; } } else if (neigh->ah) { ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha)); - return NETDEV_TX_OK; + goto unlock; } if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { @@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) phdr->hwaddr + 4); dev_kfree_skb_any(skb); ++dev->stats.tx_dropped; - return NETDEV_TX_OK; + goto unlock; } unicast_arp_send(skb, dev, phdr); } } - +unlock: + rcu_read_unlock(); return NETDEV_TX_OK; } @@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb, dst = skb_dst(skb); n = NULL; if (dst) - n = dst_get_neighbour(dst); + n = dst_get_neighbour_raw(dst); if ((!dst || !n) && daddr) { struct ipoib_pseudoheader *phdr = (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 1b7a97686356..873bff97e69e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, av.grh.dgid = mcast->mcmember.mgid; ah = ipoib_create_ah(dev, priv->pd, &av); - if (!ah) { - ipoib_warn(priv, "ib_address_create failed\n"); + if (IS_ERR(ah)) { + ipoib_warn(priv, "ib_address_create failed %ld\n", + -PTR_ERR(ah)); + /* use original error */ + return PTR_ERR(ah); } else { spin_lock_irq(&priv->lock); mcast->ah = ah; @@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, skb->dev = dev; if (dst) - n = dst_get_neighbour(dst); + n = dst_get_neighbour_raw(dst); if (!dst || !n) { /* put pseudoheader back on for next time */ skb_push(skb, sizeof (struct ipoib_pseudoheader)); @@ -722,6 +725,8 @@ out: if (mcast && mcast->ah) { struct dst_entry *dst = skb_dst(skb); struct neighbour *n = NULL; + + rcu_read_lock(); if (dst) n = dst_get_neighbour(dst); if (n && !*to_ipoib_neigh(n)) { @@ -734,7 +739,7 @@ out: list_add_tail(&neigh->list, &mcast->neigh_list); } } - + rcu_read_unlock(); spin_unlock_irqrestore(&priv->lock, flags); ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); return; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 09b93b11a274..e2a9867c19d5 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1210,18 +1210,28 @@ static int elantech_reconnect(struct psmouse *psmouse) */ static int elantech_set_properties(struct elantech_data *etd) { + /* This represents the version of IC body. */ int ver = (etd->fw_version & 0x0f0000) >> 16; + /* Early version of Elan touchpads doesn't obey the rule. */ if (etd->fw_version < 0x020030 || etd->fw_version == 0x020600) etd->hw_version = 1; - else if (etd->fw_version < 0x150600) - etd->hw_version = 2; - else if (ver == 5) - etd->hw_version = 3; - else if (ver == 6) - etd->hw_version = 4; - else - return -1; + else { + switch (ver) { + case 2: + case 4: + etd->hw_version = 2; + break; + case 5: + etd->hw_version = 3; + break; + case 6: + etd->hw_version = 4; + break; + default: + return -1; + } + } /* * Turn on packet checking by default. diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c index 4b2a42f9f0bb..d4d08bd9205b 100644 --- a/drivers/input/serio/ams_delta_serio.c +++ b/drivers/input/serio/ams_delta_serio.c @@ -24,6 +24,7 @@ #include <linux/irq.h> #include <linux/serio.h> #include <linux/slab.h> +#include <linux/module.h> #include <asm/mach-types.h> #include <plat/board-ams-delta.h> diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index bb9f5d31f0d0..b4cfc6c8be89 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -431,6 +431,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), }, }, + { + /* Newer HP Pavilion dv4 models */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), + }, + }, { } }; @@ -560,6 +567,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), }, }, + { + /* Newer HP Pavilion dv4 models */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), + }, + }, { } }; diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 5414253b185a..6bea6962f8ee 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -34,7 +34,9 @@ config AMD_IOMMU bool "AMD IOMMU support" select SWIOTLB select PCI_MSI - select PCI_IOV + select PCI_ATS + select PCI_PRI + select PCI_PASID select IOMMU_API depends on X86_64 && PCI && ACPI ---help--- @@ -58,6 +60,15 @@ config AMD_IOMMU_STATS information to userspace via debugfs. If unsure, say N. +config AMD_IOMMU_V2 + tristate "AMD IOMMU Version 2 driver (EXPERIMENTAL)" + depends on AMD_IOMMU && PROFILING && EXPERIMENTAL + select MMU_NOTIFIER + ---help--- + This option enables support for the AMD IOMMUv2 features of the IOMMU + hardware. Select this option if you want to use devices that support + the the PCI PRI and PASID interface. + # Intel IOMMU support config DMAR_TABLE bool diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 2f4448794bc7..0e36b4934aff 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o +obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 341573821864..a7cbcd46af9e 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -17,6 +17,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/ratelimit.h> #include <linux/pci.h> #include <linux/pci-ats.h> #include <linux/bitmap.h> @@ -28,6 +29,8 @@ #include <linux/iommu.h> #include <linux/delay.h> #include <linux/amd-iommu.h> +#include <linux/notifier.h> +#include <linux/export.h> #include <asm/msidef.h> #include <asm/proto.h> #include <asm/iommu.h> @@ -77,6 +80,9 @@ static struct protection_domain *pt_domain; static struct iommu_ops amd_iommu_ops; +static ATOMIC_NOTIFIER_HEAD(ppr_notifier); +int amd_iommu_max_glx_val = -1; + /* * general struct to manage commands send to an IOMMU */ @@ -85,6 +91,7 @@ struct iommu_cmd { }; static void update_domain(struct protection_domain *domain); +static int __init alloc_passthrough_domain(void); /**************************************************************************** * @@ -165,6 +172,33 @@ static struct iommu_dev_data *get_dev_data(struct device *dev) return dev->archdata.iommu; } +static bool pci_iommuv2_capable(struct pci_dev *pdev) +{ + static const int caps[] = { + PCI_EXT_CAP_ID_ATS, + PCI_PRI_CAP, + PCI_PASID_CAP, + }; + int i, pos; + + for (i = 0; i < 3; ++i) { + pos = pci_find_ext_capability(pdev, caps[i]); + if (pos == 0) + return false; + } + + return true; +} + +static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum) +{ + struct iommu_dev_data *dev_data; + + dev_data = get_dev_data(&pdev->dev); + + return dev_data->errata & (1 << erratum) ? true : false; +} + /* * In this function the list of preallocated protection domains is traversed to * find the domain for a specific device @@ -222,6 +256,7 @@ static bool check_device(struct device *dev) static int iommu_init_device(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct iommu_dev_data *dev_data; u16 alias; @@ -246,6 +281,13 @@ static int iommu_init_device(struct device *dev) dev_data->alias_data = alias_data; } + if (pci_iommuv2_capable(pdev)) { + struct amd_iommu *iommu; + + iommu = amd_iommu_rlookup_table[dev_data->devid]; + dev_data->iommu_v2 = iommu->is_iommu_v2; + } + dev->archdata.iommu = dev_data; return 0; @@ -335,6 +377,11 @@ DECLARE_STATS_COUNTER(domain_flush_single); DECLARE_STATS_COUNTER(domain_flush_all); DECLARE_STATS_COUNTER(alloced_io_mem); DECLARE_STATS_COUNTER(total_map_requests); +DECLARE_STATS_COUNTER(complete_ppr); +DECLARE_STATS_COUNTER(invalidate_iotlb); +DECLARE_STATS_COUNTER(invalidate_iotlb_all); +DECLARE_STATS_COUNTER(pri_requests); + static struct dentry *stats_dir; static struct dentry *de_fflush; @@ -369,6 +416,10 @@ static void amd_iommu_stats_init(void) amd_iommu_stats_add(&domain_flush_all); amd_iommu_stats_add(&alloced_io_mem); amd_iommu_stats_add(&total_map_requests); + amd_iommu_stats_add(&complete_ppr); + amd_iommu_stats_add(&invalidate_iotlb); + amd_iommu_stats_add(&invalidate_iotlb_all); + amd_iommu_stats_add(&pri_requests); } #endif @@ -383,8 +434,8 @@ static void dump_dte_entry(u16 devid) { int i; - for (i = 0; i < 8; ++i) - pr_err("AMD-Vi: DTE[%d]: %08x\n", i, + for (i = 0; i < 4; ++i) + pr_err("AMD-Vi: DTE[%d]: %016llx\n", i, amd_iommu_dev_table[devid].data[i]); } @@ -479,12 +530,84 @@ static void iommu_poll_events(struct amd_iommu *iommu) spin_unlock_irqrestore(&iommu->lock, flags); } +static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) +{ + struct amd_iommu_fault fault; + volatile u64 *raw; + int i; + + INC_STATS_COUNTER(pri_requests); + + raw = (u64 *)(iommu->ppr_log + head); + + /* + * Hardware bug: Interrupt may arrive before the entry is written to + * memory. If this happens we need to wait for the entry to arrive. + */ + for (i = 0; i < LOOP_TIMEOUT; ++i) { + if (PPR_REQ_TYPE(raw[0]) != 0) + break; + udelay(1); + } + + if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { + pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); + return; + } + + fault.address = raw[1]; + fault.pasid = PPR_PASID(raw[0]); + fault.device_id = PPR_DEVID(raw[0]); + fault.tag = PPR_TAG(raw[0]); + fault.flags = PPR_FLAGS(raw[0]); + + /* + * To detect the hardware bug we need to clear the entry + * to back to zero. + */ + raw[0] = raw[1] = 0; + + atomic_notifier_call_chain(&ppr_notifier, 0, &fault); +} + +static void iommu_poll_ppr_log(struct amd_iommu *iommu) +{ + unsigned long flags; + u32 head, tail; + + if (iommu->ppr_log == NULL) + return; + + spin_lock_irqsave(&iommu->lock, flags); + + head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); + + while (head != tail) { + + /* Handle PPR entry */ + iommu_handle_ppr_entry(iommu, head); + + /* Update and refresh ring-buffer state*/ + head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; + writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); + } + + /* enable ppr interrupts again */ + writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); + + spin_unlock_irqrestore(&iommu->lock, flags); +} + irqreturn_t amd_iommu_int_thread(int irq, void *data) { struct amd_iommu *iommu; - for_each_iommu(iommu) + for_each_iommu(iommu) { iommu_poll_events(iommu); + iommu_poll_ppr_log(iommu); + } return IRQ_HANDLED; } @@ -613,6 +736,60 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; } +static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid, + u64 address, bool size) +{ + memset(cmd, 0, sizeof(*cmd)); + + address &= ~(0xfffULL); + + cmd->data[0] = pasid & PASID_MASK; + cmd->data[1] = domid; + cmd->data[2] = lower_32_bits(address); + cmd->data[3] = upper_32_bits(address); + cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; + cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; + if (size) + cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; + CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); +} + +static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid, + int qdep, u64 address, bool size) +{ + memset(cmd, 0, sizeof(*cmd)); + + address &= ~(0xfffULL); + + cmd->data[0] = devid; + cmd->data[0] |= (pasid & 0xff) << 16; + cmd->data[0] |= (qdep & 0xff) << 24; + cmd->data[1] = devid; + cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16; + cmd->data[2] = lower_32_bits(address); + cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; + cmd->data[3] = upper_32_bits(address); + if (size) + cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; + CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); +} + +static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid, + int status, int tag, bool gn) +{ + memset(cmd, 0, sizeof(*cmd)); + + cmd->data[0] = devid; + if (gn) { + cmd->data[1] = pasid & PASID_MASK; + cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; + } + cmd->data[3] = tag & 0x1ff; + cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT; + + CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR); +} + static void build_inv_all(struct iommu_cmd *cmd) { memset(cmd, 0, sizeof(*cmd)); @@ -1514,6 +1691,48 @@ static void free_pagetable(struct protection_domain *domain) domain->pt_root = NULL; } +static void free_gcr3_tbl_level1(u64 *tbl) +{ + u64 *ptr; + int i; + + for (i = 0; i < 512; ++i) { + if (!(tbl[i] & GCR3_VALID)) + continue; + + ptr = __va(tbl[i] & PAGE_MASK); + + free_page((unsigned long)ptr); + } +} + +static void free_gcr3_tbl_level2(u64 *tbl) +{ + u64 *ptr; + int i; + + for (i = 0; i < 512; ++i) { + if (!(tbl[i] & GCR3_VALID)) + continue; + + ptr = __va(tbl[i] & PAGE_MASK); + + free_gcr3_tbl_level1(ptr); + } +} + +static void free_gcr3_table(struct protection_domain *domain) +{ + if (domain->glx == 2) + free_gcr3_tbl_level2(domain->gcr3_tbl); + else if (domain->glx == 1) + free_gcr3_tbl_level1(domain->gcr3_tbl); + else if (domain->glx != 0) + BUG(); + + free_page((unsigned long)domain->gcr3_tbl); +} + /* * Free a domain, only used if something went wrong in the * allocation path and we need to free an already allocated page table @@ -1600,20 +1819,52 @@ static bool dma_ops_domain(struct protection_domain *domain) static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) { - u64 pte_root = virt_to_phys(domain->pt_root); - u32 flags = 0; + u64 pte_root = 0; + u64 flags = 0; + + if (domain->mode != PAGE_MODE_NONE) + pte_root = virt_to_phys(domain->pt_root); pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; + flags = amd_iommu_dev_table[devid].data[1]; + if (ats) flags |= DTE_FLAG_IOTLB; - amd_iommu_dev_table[devid].data[3] |= flags; - amd_iommu_dev_table[devid].data[2] = domain->id; - amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); - amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); + if (domain->flags & PD_IOMMUV2_MASK) { + u64 gcr3 = __pa(domain->gcr3_tbl); + u64 glx = domain->glx; + u64 tmp; + + pte_root |= DTE_FLAG_GV; + pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT; + + /* First mask out possible old values for GCR3 table */ + tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; + flags &= ~tmp; + + tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; + flags &= ~tmp; + + /* Encode GCR3 table into DTE */ + tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A; + pte_root |= tmp; + + tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B; + flags |= tmp; + + tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C; + flags |= tmp; + } + + flags &= ~(0xffffUL); + flags |= domain->id; + + amd_iommu_dev_table[devid].data[1] = flags; + amd_iommu_dev_table[devid].data[0] = pte_root; } static void clear_dte_entry(u16 devid) @@ -1621,7 +1872,6 @@ static void clear_dte_entry(u16 devid) /* remove entry from the device table seen by the hardware */ amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; amd_iommu_dev_table[devid].data[1] = 0; - amd_iommu_dev_table[devid].data[2] = 0; amd_iommu_apply_erratum_63(devid); } @@ -1714,6 +1964,93 @@ out_unlock: return ret; } + +static void pdev_iommuv2_disable(struct pci_dev *pdev) +{ + pci_disable_ats(pdev); + pci_disable_pri(pdev); + pci_disable_pasid(pdev); +} + +/* FIXME: Change generic reset-function to do the same */ +static int pri_reset_while_enabled(struct pci_dev *pdev) +{ + u16 control; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); + if (!pos) + return -EINVAL; + + pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); + control |= PCI_PRI_RESET; + pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control); + + return 0; +} + +static int pdev_iommuv2_enable(struct pci_dev *pdev) +{ + bool reset_enable; + int reqs, ret; + + /* FIXME: Hardcode number of outstanding requests for now */ + reqs = 32; + if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE)) + reqs = 1; + reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET); + + /* Only allow access to user-accessible pages */ + ret = pci_enable_pasid(pdev, 0); + if (ret) + goto out_err; + + /* First reset the PRI state of the device */ + ret = pci_reset_pri(pdev); + if (ret) + goto out_err; + + /* Enable PRI */ + ret = pci_enable_pri(pdev, reqs); + if (ret) + goto out_err; + + if (reset_enable) { + ret = pri_reset_while_enabled(pdev); + if (ret) + goto out_err; + } + + ret = pci_enable_ats(pdev, PAGE_SHIFT); + if (ret) + goto out_err; + + return 0; + +out_err: + pci_disable_pri(pdev); + pci_disable_pasid(pdev); + + return ret; +} + +/* FIXME: Move this to PCI code */ +#define PCI_PRI_TLP_OFF (1 << 2) + +bool pci_pri_tlp_required(struct pci_dev *pdev) +{ + u16 control; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_PRI_CAP); + if (!pos) + return false; + + pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control); + + return (control & PCI_PRI_TLP_OFF) ? true : false; +} + /* * If a device is not yet associated with a domain, this function does * assigns it visible for the hardware @@ -1728,7 +2065,18 @@ static int attach_device(struct device *dev, dev_data = get_dev_data(dev); - if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) { + if (domain->flags & PD_IOMMUV2_MASK) { + if (!dev_data->iommu_v2 || !dev_data->passthrough) + return -EINVAL; + + if (pdev_iommuv2_enable(pdev) != 0) + return -EINVAL; + + dev_data->ats.enabled = true; + dev_data->ats.qdep = pci_ats_queue_depth(pdev); + dev_data->pri_tlp = pci_pri_tlp_required(pdev); + } else if (amd_iommu_iotlb_sup && + pci_enable_ats(pdev, PAGE_SHIFT) == 0) { dev_data->ats.enabled = true; dev_data->ats.qdep = pci_ats_queue_depth(pdev); } @@ -1778,7 +2126,7 @@ static void __detach_device(struct iommu_dev_data *dev_data) * passthrough domain if it is detached from any other domain. * Make sure we can deassign from the pt_domain itself. */ - if (iommu_pass_through && + if (dev_data->passthrough && (dev_data->domain == NULL && domain != pt_domain)) __attach_device(dev_data, pt_domain); } @@ -1788,20 +2136,24 @@ static void __detach_device(struct iommu_dev_data *dev_data) */ static void detach_device(struct device *dev) { + struct protection_domain *domain; struct iommu_dev_data *dev_data; unsigned long flags; dev_data = get_dev_data(dev); + domain = dev_data->domain; /* lock device table */ write_lock_irqsave(&amd_iommu_devtable_lock, flags); __detach_device(dev_data); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - if (dev_data->ats.enabled) { + if (domain->flags & PD_IOMMUV2_MASK) + pdev_iommuv2_disable(to_pci_dev(dev)); + else if (dev_data->ats.enabled) pci_disable_ats(to_pci_dev(dev)); - dev_data->ats.enabled = false; - } + + dev_data->ats.enabled = false; } /* @@ -1836,18 +2188,20 @@ static struct protection_domain *domain_for_device(struct device *dev) static int device_change_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct device *dev = data; - u16 devid; - struct protection_domain *domain; struct dma_ops_domain *dma_domain; + struct protection_domain *domain; + struct iommu_dev_data *dev_data; + struct device *dev = data; struct amd_iommu *iommu; unsigned long flags; + u16 devid; if (!check_device(dev)) return 0; - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; + devid = get_device_id(dev); + iommu = amd_iommu_rlookup_table[devid]; + dev_data = get_dev_data(dev); switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: @@ -1856,7 +2210,7 @@ static int device_change_notifier(struct notifier_block *nb, if (!domain) goto out; - if (iommu_pass_through) + if (dev_data->passthrough) break; detach_device(dev); break; @@ -2452,8 +2806,9 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) */ static void prealloc_protection_domains(void) { - struct pci_dev *dev = NULL; + struct iommu_dev_data *dev_data; struct dma_ops_domain *dma_dom; + struct pci_dev *dev = NULL; u16 devid; for_each_pci_dev(dev) { @@ -2462,6 +2817,16 @@ static void prealloc_protection_domains(void) if (!check_device(&dev->dev)) continue; + dev_data = get_dev_data(&dev->dev); + if (!amd_iommu_force_isolation && dev_data->iommu_v2) { + /* Make sure passthrough domain is allocated */ + alloc_passthrough_domain(); + dev_data->passthrough = true; + attach_device(&dev->dev, pt_domain); + pr_info("AMD-Vi: Using passthough domain for device %s\n", + dev_name(&dev->dev)); + } + /* Is there already any domain for it? */ if (domain_for_device(&dev->dev)) continue; @@ -2492,6 +2857,7 @@ static struct dma_map_ops amd_iommu_dma_ops = { static unsigned device_dma_ops_init(void) { + struct iommu_dev_data *dev_data; struct pci_dev *pdev = NULL; unsigned unhandled = 0; @@ -2501,7 +2867,12 @@ static unsigned device_dma_ops_init(void) continue; } - pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops; + dev_data = get_dev_data(&pdev->dev); + + if (!dev_data->passthrough) + pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops; + else + pdev->dev.archdata.dma_ops = &nommu_dma_ops; } return unhandled; @@ -2628,6 +2999,20 @@ out_err: return NULL; } +static int __init alloc_passthrough_domain(void) +{ + if (pt_domain != NULL) + return 0; + + /* allocate passthrough domain */ + pt_domain = protection_domain_alloc(); + if (!pt_domain) + return -ENOMEM; + + pt_domain->mode = PAGE_MODE_NONE; + + return 0; +} static int amd_iommu_domain_init(struct iommu_domain *dom) { struct protection_domain *domain; @@ -2641,6 +3026,8 @@ static int amd_iommu_domain_init(struct iommu_domain *dom) if (!domain->pt_root) goto out_free; + domain->iommu_domain = dom; + dom->priv = domain; return 0; @@ -2663,7 +3050,11 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) BUG_ON(domain->dev_cnt != 0); - free_pagetable(domain); + if (domain->mode != PAGE_MODE_NONE) + free_pagetable(domain); + + if (domain->flags & PD_IOMMUV2_MASK) + free_gcr3_table(domain); protection_domain_free(domain); @@ -2726,6 +3117,9 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, int prot = 0; int ret; + if (domain->mode == PAGE_MODE_NONE) + return -EINVAL; + if (iommu_prot & IOMMU_READ) prot |= IOMMU_PROT_IR; if (iommu_prot & IOMMU_WRITE) @@ -2744,6 +3138,9 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, struct protection_domain *domain = dom->priv; size_t unmap_size; + if (domain->mode == PAGE_MODE_NONE) + return -EINVAL; + mutex_lock(&domain->api_lock); unmap_size = iommu_unmap_page(domain, iova, page_size); mutex_unlock(&domain->api_lock); @@ -2761,6 +3158,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, phys_addr_t paddr; u64 *pte, __pte; + if (domain->mode == PAGE_MODE_NONE) + return iova; + pte = fetch_pte(domain, iova); if (!pte || !IOMMU_PTE_PRESENT(*pte)) @@ -2812,21 +3212,23 @@ static struct iommu_ops amd_iommu_ops = { int __init amd_iommu_init_passthrough(void) { - struct amd_iommu *iommu; + struct iommu_dev_data *dev_data; struct pci_dev *dev = NULL; + struct amd_iommu *iommu; u16 devid; + int ret; - /* allocate passthrough domain */ - pt_domain = protection_domain_alloc(); - if (!pt_domain) - return -ENOMEM; - - pt_domain->mode |= PAGE_MODE_NONE; + ret = alloc_passthrough_domain(); + if (ret) + return ret; for_each_pci_dev(dev) { if (!check_device(&dev->dev)) continue; + dev_data = get_dev_data(&dev->dev); + dev_data->passthrough = true; + devid = get_device_id(&dev->dev); iommu = amd_iommu_rlookup_table[devid]; @@ -2840,3 +3242,326 @@ int __init amd_iommu_init_passthrough(void) return 0; } + +/* IOMMUv2 specific functions */ +int amd_iommu_register_ppr_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&ppr_notifier, nb); +} +EXPORT_SYMBOL(amd_iommu_register_ppr_notifier); + +int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&ppr_notifier, nb); +} +EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier); + +void amd_iommu_domain_direct_map(struct iommu_domain *dom) +{ + struct protection_domain *domain = dom->priv; + unsigned long flags; + + spin_lock_irqsave(&domain->lock, flags); + + /* Update data structure */ + domain->mode = PAGE_MODE_NONE; + domain->updated = true; + + /* Make changes visible to IOMMUs */ + update_domain(domain); + + /* Page-table is not visible to IOMMU anymore, so free it */ + free_pagetable(domain); + + spin_unlock_irqrestore(&domain->lock, flags); +} +EXPORT_SYMBOL(amd_iommu_domain_direct_map); + +int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids) +{ + struct protection_domain *domain = dom->priv; + unsigned long flags; + int levels, ret; + + if (pasids <= 0 || pasids > (PASID_MASK + 1)) + return -EINVAL; + + /* Number of GCR3 table levels required */ + for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9) + levels += 1; + + if (levels > amd_iommu_max_glx_val) + return -EINVAL; + + spin_lock_irqsave(&domain->lock, flags); + + /* + * Save us all sanity checks whether devices already in the + * domain support IOMMUv2. Just force that the domain has no + * devices attached when it is switched into IOMMUv2 mode. + */ + ret = -EBUSY; + if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) + goto out; + + ret = -ENOMEM; + domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); + if (domain->gcr3_tbl == NULL) + goto out; + + domain->glx = levels; + domain->flags |= PD_IOMMUV2_MASK; + domain->updated = true; + + update_domain(domain); + + ret = 0; + +out: + spin_unlock_irqrestore(&domain->lock, flags); + + return ret; +} +EXPORT_SYMBOL(amd_iommu_domain_enable_v2); + +static int __flush_pasid(struct protection_domain *domain, int pasid, + u64 address, bool size) +{ + struct iommu_dev_data *dev_data; + struct iommu_cmd cmd; + int i, ret; + + if (!(domain->flags & PD_IOMMUV2_MASK)) + return -EINVAL; + + build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); + + /* + * IOMMU TLB needs to be flushed before Device TLB to + * prevent device TLB refill from IOMMU TLB + */ + for (i = 0; i < amd_iommus_present; ++i) { + if (domain->dev_iommu[i] == 0) + continue; + + ret = iommu_queue_command(amd_iommus[i], &cmd); + if (ret != 0) + goto out; + } + + /* Wait until IOMMU TLB flushes are complete */ + domain_flush_complete(domain); + + /* Now flush device TLBs */ + list_for_each_entry(dev_data, &domain->dev_list, list) { + struct amd_iommu *iommu; + int qdep; + + BUG_ON(!dev_data->ats.enabled); + + qdep = dev_data->ats.qdep; + iommu = amd_iommu_rlookup_table[dev_data->devid]; + + build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, + qdep, address, size); + + ret = iommu_queue_command(iommu, &cmd); + if (ret != 0) + goto out; + } + + /* Wait until all device TLBs are flushed */ + domain_flush_complete(domain); + + ret = 0; + +out: + + return ret; +} + +static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, + u64 address) +{ + INC_STATS_COUNTER(invalidate_iotlb); + + return __flush_pasid(domain, pasid, address, false); +} + +int amd_iommu_flush_page(struct iommu_domain *dom, int pasid, + u64 address) +{ + struct protection_domain *domain = dom->priv; + unsigned long flags; + int ret; + + spin_lock_irqsave(&domain->lock, flags); + ret = __amd_iommu_flush_page(domain, pasid, address); + spin_unlock_irqrestore(&domain->lock, flags); + + return ret; +} +EXPORT_SYMBOL(amd_iommu_flush_page); + +static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) +{ + INC_STATS_COUNTER(invalidate_iotlb_all); + + return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, + true); +} + +int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid) +{ + struct protection_domain *domain = dom->priv; + unsigned long flags; + int ret; + + spin_lock_irqsave(&domain->lock, flags); + ret = __amd_iommu_flush_tlb(domain, pasid); + spin_unlock_irqrestore(&domain->lock, flags); + + return ret; +} +EXPORT_SYMBOL(amd_iommu_flush_tlb); + +static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc) +{ + int index; + u64 *pte; + + while (true) { + + index = (pasid >> (9 * level)) & 0x1ff; + pte = &root[index]; + + if (level == 0) + break; + + if (!(*pte & GCR3_VALID)) { + if (!alloc) + return NULL; + + root = (void *)get_zeroed_page(GFP_ATOMIC); + if (root == NULL) + return NULL; + + *pte = __pa(root) | GCR3_VALID; + } + + root = __va(*pte & PAGE_MASK); + + level -= 1; + } + + return pte; +} + +static int __set_gcr3(struct protection_domain *domain, int pasid, + unsigned long cr3) +{ + u64 *pte; + + if (domain->mode != PAGE_MODE_NONE) + return -EINVAL; + + pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); + if (pte == NULL) + return -ENOMEM; + + *pte = (cr3 & PAGE_MASK) | GCR3_VALID; + + return __amd_iommu_flush_tlb(domain, pasid); +} + +static int __clear_gcr3(struct protection_domain *domain, int pasid) +{ + u64 *pte; + + if (domain->mode != PAGE_MODE_NONE) + return -EINVAL; + + pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); + if (pte == NULL) + return 0; + + *pte = 0; + + return __amd_iommu_flush_tlb(domain, pasid); +} + +int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid, + unsigned long cr3) +{ + struct protection_domain *domain = dom->priv; + unsigned long flags; + int ret; + + spin_lock_irqsave(&domain->lock, flags); + ret = __set_gcr3(domain, pasid, cr3); + spin_unlock_irqrestore(&domain->lock, flags); + + return ret; +} +EXPORT_SYMBOL(amd_iommu_domain_set_gcr3); + +int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid) +{ + struct protection_domain *domain = dom->priv; + unsigned long flags; + int ret; + + spin_lock_irqsave(&domain->lock, flags); + ret = __clear_gcr3(domain, pasid); + spin_unlock_irqrestore(&domain->lock, flags); + + return ret; +} +EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3); + +int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid, + int status, int tag) +{ + struct iommu_dev_data *dev_data; + struct amd_iommu *iommu; + struct iommu_cmd cmd; + + INC_STATS_COUNTER(complete_ppr); + + dev_data = get_dev_data(&pdev->dev); + iommu = amd_iommu_rlookup_table[dev_data->devid]; + + build_complete_ppr(&cmd, dev_data->devid, pasid, status, + tag, dev_data->pri_tlp); + + return iommu_queue_command(iommu, &cmd); +} +EXPORT_SYMBOL(amd_iommu_complete_ppr); + +struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev) +{ + struct protection_domain *domain; + + domain = get_domain(&pdev->dev); + if (IS_ERR(domain)) + return NULL; + + /* Only return IOMMUv2 domains */ + if (!(domain->flags & PD_IOMMUV2_MASK)) + return NULL; + + return domain->iommu_domain; +} +EXPORT_SYMBOL(amd_iommu_get_v2_domain); + +void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum) +{ + struct iommu_dev_data *dev_data; + + if (!amd_iommu_v2_supported()) + return; + + dev_data = get_dev_data(&pdev->dev); + dev_data->errata |= (1 << erratum); +} +EXPORT_SYMBOL(amd_iommu_enable_device_erratum); diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 82d2410f4205..c7a5d7e14547 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/msi.h> #include <linux/amd-iommu.h> +#include <linux/export.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/gart.h> @@ -141,6 +142,12 @@ int amd_iommus_present; bool amd_iommu_np_cache __read_mostly; bool amd_iommu_iotlb_sup __read_mostly = true; +u32 amd_iommu_max_pasids __read_mostly = ~0; + +bool amd_iommu_v2_present __read_mostly; + +bool amd_iommu_force_isolation __read_mostly; + /* * The ACPI table parsing functions set this variable on an error */ @@ -581,21 +588,69 @@ static void __init free_event_buffer(struct amd_iommu *iommu) free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); } +/* allocates the memory where the IOMMU will log its events to */ +static u8 * __init alloc_ppr_log(struct amd_iommu *iommu) +{ + iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(PPR_LOG_SIZE)); + + if (iommu->ppr_log == NULL) + return NULL; + + return iommu->ppr_log; +} + +static void iommu_enable_ppr_log(struct amd_iommu *iommu) +{ + u64 entry; + + if (iommu->ppr_log == NULL) + return; + + entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; + + memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, + &entry, sizeof(entry)); + + /* set head and tail to zero manually */ + writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); + + iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); + iommu_feature_enable(iommu, CONTROL_PPR_EN); +} + +static void __init free_ppr_log(struct amd_iommu *iommu) +{ + if (iommu->ppr_log == NULL) + return; + + free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); +} + +static void iommu_enable_gt(struct amd_iommu *iommu) +{ + if (!iommu_feature(iommu, FEATURE_GT)) + return; + + iommu_feature_enable(iommu, CONTROL_GT_EN); +} + /* sets a specific bit in the device table entry. */ static void set_dev_entry_bit(u16 devid, u8 bit) { - int i = (bit >> 5) & 0x07; - int _bit = bit & 0x1f; + int i = (bit >> 6) & 0x03; + int _bit = bit & 0x3f; - amd_iommu_dev_table[devid].data[i] |= (1 << _bit); + amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); } static int get_dev_entry_bit(u16 devid, u8 bit) { - int i = (bit >> 5) & 0x07; - int _bit = bit & 0x1f; + int i = (bit >> 6) & 0x03; + int _bit = bit & 0x3f; - return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit; + return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; } @@ -699,6 +754,32 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) iommu->features = ((u64)high << 32) | low; + if (iommu_feature(iommu, FEATURE_GT)) { + int glxval; + u32 pasids; + u64 shift; + + shift = iommu->features & FEATURE_PASID_MASK; + shift >>= FEATURE_PASID_SHIFT; + pasids = (1 << shift); + + amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); + + glxval = iommu->features & FEATURE_GLXVAL_MASK; + glxval >>= FEATURE_GLXVAL_SHIFT; + + if (amd_iommu_max_glx_val == -1) + amd_iommu_max_glx_val = glxval; + else + amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); + } + + if (iommu_feature(iommu, FEATURE_GT) && + iommu_feature(iommu, FEATURE_PPR)) { + iommu->is_iommu_v2 = true; + amd_iommu_v2_present = true; + } + if (!is_rd890_iommu(iommu->dev)) return; @@ -901,6 +982,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu) { free_command_buffer(iommu); free_event_buffer(iommu); + free_ppr_log(iommu); iommu_unmap_mmio_space(iommu); } @@ -964,6 +1046,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) init_iommu_from_acpi(iommu, h); init_iommu_devices(iommu); + if (iommu_feature(iommu, FEATURE_PPR)) { + iommu->ppr_log = alloc_ppr_log(iommu); + if (!iommu->ppr_log) + return -ENOMEM; + } + if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) amd_iommu_np_cache = true; @@ -1050,6 +1138,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu) iommu->int_enabled = true; iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); + if (iommu->ppr_log != NULL) + iommu_feature_enable(iommu, CONTROL_PPFINT_EN); + return 0; } @@ -1274,6 +1365,8 @@ static void enable_iommus(void) iommu_set_device_table(iommu); iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); + iommu_enable_ppr_log(iommu); + iommu_enable_gt(iommu); iommu_set_exclusion_range(iommu); iommu_init_msi(iommu); iommu_enable(iommu); @@ -1560,6 +1653,8 @@ static int __init parse_amd_iommu_options(char *str) amd_iommu_unmap_flush = true; if (strncmp(str, "off", 3) == 0) amd_iommu_disabled = true; + if (strncmp(str, "force_isolation", 15) == 0) + amd_iommu_force_isolation = true; } return 1; @@ -1572,3 +1667,9 @@ IOMMU_INIT_FINISH(amd_iommu_detect, gart_iommu_hole_init, 0, 0); + +bool amd_iommu_v2_supported(void) +{ + return amd_iommu_v2_present; +} +EXPORT_SYMBOL(amd_iommu_v2_supported); diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 7ffaa64410b0..1a7f41c6cc66 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h @@ -31,6 +31,30 @@ extern int amd_iommu_init_devices(void); extern void amd_iommu_uninit_devices(void); extern void amd_iommu_init_notifier(void); extern void amd_iommu_init_api(void); + +/* IOMMUv2 specific functions */ +struct iommu_domain; + +extern bool amd_iommu_v2_supported(void); +extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb); +extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb); +extern void amd_iommu_domain_direct_map(struct iommu_domain *dom); +extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids); +extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid, + u64 address); +extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid); +extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid, + unsigned long cr3); +extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid); +extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev); + +#define PPR_SUCCESS 0x0 +#define PPR_INVALID 0x1 +#define PPR_FAILURE 0xf + +extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid, + int status, int tag); + #ifndef CONFIG_AMD_IOMMU_STATS static inline void amd_iommu_stats_init(void) { } diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 5b9c5075e81a..6ad8b10b3130 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -69,11 +69,14 @@ #define MMIO_EXCL_BASE_OFFSET 0x0020 #define MMIO_EXCL_LIMIT_OFFSET 0x0028 #define MMIO_EXT_FEATURES 0x0030 +#define MMIO_PPR_LOG_OFFSET 0x0038 #define MMIO_CMD_HEAD_OFFSET 0x2000 #define MMIO_CMD_TAIL_OFFSET 0x2008 #define MMIO_EVT_HEAD_OFFSET 0x2010 #define MMIO_EVT_TAIL_OFFSET 0x2018 #define MMIO_STATUS_OFFSET 0x2020 +#define MMIO_PPR_HEAD_OFFSET 0x2030 +#define MMIO_PPR_TAIL_OFFSET 0x2038 /* Extended Feature Bits */ @@ -87,8 +90,17 @@ #define FEATURE_HE (1ULL<<8) #define FEATURE_PC (1ULL<<9) +#define FEATURE_PASID_SHIFT 32 +#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) + +#define FEATURE_GLXVAL_SHIFT 14 +#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) + +#define PASID_MASK 0x000fffff + /* MMIO status bits */ -#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 +#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) +#define MMIO_STATUS_PPR_INT_MASK (1 << 6) /* event logging constants */ #define EVENT_ENTRY_SIZE 0x10 @@ -122,18 +134,25 @@ #define CONTROL_CMDBUF_EN 0x0cULL #define CONTROL_PPFLOG_EN 0x0dULL #define CONTROL_PPFINT_EN 0x0eULL +#define CONTROL_PPR_EN 0x0fULL +#define CONTROL_GT_EN 0x10ULL /* command specific defines */ #define CMD_COMPL_WAIT 0x01 #define CMD_INV_DEV_ENTRY 0x02 #define CMD_INV_IOMMU_PAGES 0x03 #define CMD_INV_IOTLB_PAGES 0x04 +#define CMD_COMPLETE_PPR 0x07 #define CMD_INV_ALL 0x08 #define CMD_COMPL_WAIT_STORE_MASK 0x01 #define CMD_COMPL_WAIT_INT_MASK 0x02 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 +#define CMD_INV_IOMMU_PAGES_GN_MASK 0x04 + +#define PPR_STATUS_MASK 0xf +#define PPR_STATUS_SHIFT 12 #define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL @@ -165,6 +184,23 @@ #define EVT_BUFFER_SIZE 8192 /* 512 entries */ #define EVT_LEN_MASK (0x9ULL << 56) +/* Constants for PPR Log handling */ +#define PPR_LOG_ENTRIES 512 +#define PPR_LOG_SIZE_SHIFT 56 +#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT) +#define PPR_ENTRY_SIZE 16 +#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) + +#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) +#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) +#define PPR_DEVID(x) ((x) & 0xffffULL) +#define PPR_TAG(x) (((x) >> 32) & 0x3ffULL) +#define PPR_PASID1(x) (((x) >> 16) & 0xffffULL) +#define PPR_PASID2(x) (((x) >> 42) & 0xfULL) +#define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x)) + +#define PPR_REQ_FAULT 0x01 + #define PAGE_MODE_NONE 0x00 #define PAGE_MODE_1_LEVEL 0x01 #define PAGE_MODE_2_LEVEL 0x02 @@ -230,7 +266,24 @@ #define IOMMU_PTE_IR (1ULL << 61) #define IOMMU_PTE_IW (1ULL << 62) -#define DTE_FLAG_IOTLB 0x01 +#define DTE_FLAG_IOTLB (0x01UL << 32) +#define DTE_FLAG_GV (0x01ULL << 55) +#define DTE_GLX_SHIFT (56) +#define DTE_GLX_MASK (3) + +#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) +#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) +#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL) + +#define DTE_GCR3_INDEX_A 0 +#define DTE_GCR3_INDEX_B 1 +#define DTE_GCR3_INDEX_C 1 + +#define DTE_GCR3_SHIFT_A 58 +#define DTE_GCR3_SHIFT_B 16 +#define DTE_GCR3_SHIFT_C 43 + +#define GCR3_VALID 0x01ULL #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) @@ -257,6 +310,7 @@ domain for an IOMMU */ #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page translation */ +#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ extern bool amd_iommu_dump; #define DUMP_printk(format, arg...) \ @@ -285,6 +339,29 @@ extern bool amd_iommu_iotlb_sup; #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) + +/* + * This struct is used to pass information about + * incoming PPR faults around. + */ +struct amd_iommu_fault { + u64 address; /* IO virtual address of the fault*/ + u32 pasid; /* Address space identifier */ + u16 device_id; /* Originating PCI device id */ + u16 tag; /* PPR tag */ + u16 flags; /* Fault flags */ + +}; + +#define PPR_FAULT_EXEC (1 << 1) +#define PPR_FAULT_READ (1 << 2) +#define PPR_FAULT_WRITE (1 << 5) +#define PPR_FAULT_USER (1 << 6) +#define PPR_FAULT_RSVD (1 << 7) +#define PPR_FAULT_GN (1 << 8) + +struct iommu_domain; + /* * This structure contains generic data for IOMMU protection domains * independent of their use. @@ -297,11 +374,15 @@ struct protection_domain { u16 id; /* the domain id written to the device table */ int mode; /* paging mode (0-6 levels) */ u64 *pt_root; /* page table root pointer */ + int glx; /* Number of levels for GCR3 table */ + u64 *gcr3_tbl; /* Guest CR3 table */ unsigned long flags; /* flags to find out type of domain */ bool updated; /* complete domain flush required */ unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ void *priv; /* private data */ + struct iommu_domain *iommu_domain; /* Pointer to generic + domain structure */ }; @@ -315,10 +396,15 @@ struct iommu_dev_data { struct protection_domain *domain; /* Domain the device is bound to */ atomic_t bind; /* Domain attach reverent count */ u16 devid; /* PCI Device ID */ + bool iommu_v2; /* Device can make use of IOMMUv2 */ + bool passthrough; /* Default for device is pt_domain */ struct { bool enabled; int qdep; } ats; /* ATS state */ + bool pri_tlp; /* PASID TLB required for + PPR completions */ + u32 errata; /* Bitmap for errata to apply */ }; /* @@ -399,6 +485,9 @@ struct amd_iommu { /* Extended features */ u64 features; + /* IOMMUv2 */ + bool is_iommu_v2; + /* * Capability pointer. There could be more than one IOMMU per PCI * device function if there are more than one AMD IOMMU capability @@ -431,6 +520,9 @@ struct amd_iommu { /* MSI number for event interrupt */ u16 evt_msi_num; + /* Base of the PPR log, if present */ + u8 *ppr_log; + /* true if interrupts for this IOMMU are already enabled */ bool int_enabled; @@ -484,7 +576,7 @@ extern struct list_head amd_iommu_pd_list; * Structure defining one entry in the device table */ struct dev_table_entry { - u32 data[8]; + u64 data[4]; }; /* @@ -549,6 +641,16 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap; */ extern bool amd_iommu_unmap_flush; +/* Smallest number of PASIDs supported by any IOMMU in the system */ +extern u32 amd_iommu_max_pasids; + +extern bool amd_iommu_v2_present; + +extern bool amd_iommu_force_isolation; + +/* Max levels of glxval supported */ +extern int amd_iommu_max_glx_val; + /* takes bus and device/function and returns the device id * FIXME: should that be in generic PCI code? */ static inline u16 calc_devid(u8 bus, u8 devfn) diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c new file mode 100644 index 000000000000..fe812e2a0474 --- /dev/null +++ b/drivers/iommu/amd_iommu_v2.c @@ -0,0 +1,959 @@ +/* + * Copyright (C) 2010-2012 Advanced Micro Devices, Inc. + * Author: Joerg Roedel <joerg.roedel@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/mmu_notifier.h> +#include <linux/amd-iommu.h> +#include <linux/mm_types.h> +#include <linux/profile.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/iommu.h> +#include <linux/wait.h> +#include <linux/pci.h> +#include <linux/gfp.h> + +#include "amd_iommu_types.h" +#include "amd_iommu_proto.h" + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>"); + +#define MAX_DEVICES 0x10000 +#define PRI_QUEUE_SIZE 512 + +struct pri_queue { + atomic_t inflight; + bool finish; + int status; +}; + +struct pasid_state { + struct list_head list; /* For global state-list */ + atomic_t count; /* Reference count */ + struct task_struct *task; /* Task bound to this PASID */ + struct mm_struct *mm; /* mm_struct for the faults */ + struct mmu_notifier mn; /* mmu_otifier handle */ + struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ + struct device_state *device_state; /* Link to our device_state */ + int pasid; /* PASID index */ + spinlock_t lock; /* Protect pri_queues */ + wait_queue_head_t wq; /* To wait for count == 0 */ +}; + +struct device_state { + atomic_t count; + struct pci_dev *pdev; + struct pasid_state **states; + struct iommu_domain *domain; + int pasid_levels; + int max_pasids; + amd_iommu_invalid_ppr_cb inv_ppr_cb; + spinlock_t lock; + wait_queue_head_t wq; +}; + +struct fault { + struct work_struct work; + struct device_state *dev_state; + struct pasid_state *state; + struct mm_struct *mm; + u64 address; + u16 devid; + u16 pasid; + u16 tag; + u16 finish; + u16 flags; +}; + +struct device_state **state_table; +static spinlock_t state_lock; + +/* List and lock for all pasid_states */ +static LIST_HEAD(pasid_state_list); +static DEFINE_SPINLOCK(ps_lock); + +static struct workqueue_struct *iommu_wq; + +/* + * Empty page table - Used between + * mmu_notifier_invalidate_range_start and + * mmu_notifier_invalidate_range_end + */ +static u64 *empty_page_table; + +static void free_pasid_states(struct device_state *dev_state); +static void unbind_pasid(struct device_state *dev_state, int pasid); +static int task_exit(struct notifier_block *nb, unsigned long e, void *data); + +static u16 device_id(struct pci_dev *pdev) +{ + u16 devid; + + devid = pdev->bus->number; + devid = (devid << 8) | pdev->devfn; + + return devid; +} + +static struct device_state *get_device_state(u16 devid) +{ + struct device_state *dev_state; + unsigned long flags; + + spin_lock_irqsave(&state_lock, flags); + dev_state = state_table[devid]; + if (dev_state != NULL) + atomic_inc(&dev_state->count); + spin_unlock_irqrestore(&state_lock, flags); + + return dev_state; +} + +static void free_device_state(struct device_state *dev_state) +{ + /* + * First detach device from domain - No more PRI requests will arrive + * from that device after it is unbound from the IOMMUv2 domain. + */ + iommu_detach_device(dev_state->domain, &dev_state->pdev->dev); + + /* Everything is down now, free the IOMMUv2 domain */ + iommu_domain_free(dev_state->domain); + + /* Finally get rid of the device-state */ + kfree(dev_state); +} + +static void put_device_state(struct device_state *dev_state) +{ + if (atomic_dec_and_test(&dev_state->count)) + wake_up(&dev_state->wq); +} + +static void put_device_state_wait(struct device_state *dev_state) +{ + DEFINE_WAIT(wait); + + prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE); + if (!atomic_dec_and_test(&dev_state->count)) + schedule(); + finish_wait(&dev_state->wq, &wait); + + free_device_state(dev_state); +} + +static struct notifier_block profile_nb = { + .notifier_call = task_exit, +}; + +static void link_pasid_state(struct pasid_state *pasid_state) +{ + spin_lock(&ps_lock); + list_add_tail(&pasid_state->list, &pasid_state_list); + spin_unlock(&ps_lock); +} + +static void __unlink_pasid_state(struct pasid_state *pasid_state) +{ + list_del(&pasid_state->list); +} + +static void unlink_pasid_state(struct pasid_state *pasid_state) +{ + spin_lock(&ps_lock); + __unlink_pasid_state(pasid_state); + spin_unlock(&ps_lock); +} + +/* Must be called under dev_state->lock */ +static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, + int pasid, bool alloc) +{ + struct pasid_state **root, **ptr; + int level, index; + + level = dev_state->pasid_levels; + root = dev_state->states; + + while (true) { + + index = (pasid >> (9 * level)) & 0x1ff; + ptr = &root[index]; + + if (level == 0) + break; + + if (*ptr == NULL) { + if (!alloc) + return NULL; + + *ptr = (void *)get_zeroed_page(GFP_ATOMIC); + if (*ptr == NULL) + return NULL; + } + + root = (struct pasid_state **)*ptr; + level -= 1; + } + + return ptr; +} + +static int set_pasid_state(struct device_state *dev_state, + struct pasid_state *pasid_state, + int pasid) +{ + struct pasid_state **ptr; + unsigned long flags; + int ret; + + spin_lock_irqsave(&dev_state->lock, flags); + ptr = __get_pasid_state_ptr(dev_state, pasid, true); + + ret = -ENOMEM; + if (ptr == NULL) + goto out_unlock; + + ret = -ENOMEM; + if (*ptr != NULL) + goto out_unlock; + + *ptr = pasid_state; + + ret = 0; + +out_unlock: + spin_unlock_irqrestore(&dev_state->lock, flags); + + return ret; +} + +static void clear_pasid_state(struct device_state *dev_state, int pasid) +{ + struct pasid_state **ptr; + unsigned long flags; + + spin_lock_irqsave(&dev_state->lock, flags); + ptr = __get_pasid_state_ptr(dev_state, pasid, true); + + if (ptr == NULL) + goto out_unlock; + + *ptr = NULL; + +out_unlock: + spin_unlock_irqrestore(&dev_state->lock, flags); +} + +static struct pasid_state *get_pasid_state(struct device_state *dev_state, + int pasid) +{ + struct pasid_state **ptr, *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&dev_state->lock, flags); + ptr = __get_pasid_state_ptr(dev_state, pasid, false); + + if (ptr == NULL) + goto out_unlock; + + ret = *ptr; + if (ret) + atomic_inc(&ret->count); + +out_unlock: + spin_unlock_irqrestore(&dev_state->lock, flags); + + return ret; +} + +static void free_pasid_state(struct pasid_state *pasid_state) +{ + kfree(pasid_state); +} + +static void put_pasid_state(struct pasid_state *pasid_state) +{ + if (atomic_dec_and_test(&pasid_state->count)) { + put_device_state(pasid_state->device_state); + wake_up(&pasid_state->wq); + } +} + +static void put_pasid_state_wait(struct pasid_state *pasid_state) +{ + DEFINE_WAIT(wait); + + prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); + + if (atomic_dec_and_test(&pasid_state->count)) + put_device_state(pasid_state->device_state); + else + schedule(); + + finish_wait(&pasid_state->wq, &wait); + mmput(pasid_state->mm); + free_pasid_state(pasid_state); +} + +static void __unbind_pasid(struct pasid_state *pasid_state) +{ + struct iommu_domain *domain; + + domain = pasid_state->device_state->domain; + + amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); + clear_pasid_state(pasid_state->device_state, pasid_state->pasid); + + /* Make sure no more pending faults are in the queue */ + flush_workqueue(iommu_wq); + + mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); + + put_pasid_state(pasid_state); /* Reference taken in bind() function */ +} + +static void unbind_pasid(struct device_state *dev_state, int pasid) +{ + struct pasid_state *pasid_state; + + pasid_state = get_pasid_state(dev_state, pasid); + if (pasid_state == NULL) + return; + + unlink_pasid_state(pasid_state); + __unbind_pasid(pasid_state); + put_pasid_state_wait(pasid_state); /* Reference taken in this function */ +} + +static void free_pasid_states_level1(struct pasid_state **tbl) +{ + int i; + + for (i = 0; i < 512; ++i) { + if (tbl[i] == NULL) + continue; + + free_page((unsigned long)tbl[i]); + } +} + +static void free_pasid_states_level2(struct pasid_state **tbl) +{ + struct pasid_state **ptr; + int i; + + for (i = 0; i < 512; ++i) { + if (tbl[i] == NULL) + continue; + + ptr = (struct pasid_state **)tbl[i]; + free_pasid_states_level1(ptr); + } +} + +static void free_pasid_states(struct device_state *dev_state) +{ + struct pasid_state *pasid_state; + int i; + + for (i = 0; i < dev_state->max_pasids; ++i) { + pasid_state = get_pasid_state(dev_state, i); + if (pasid_state == NULL) + continue; + + put_pasid_state(pasid_state); + unbind_pasid(dev_state, i); + } + + if (dev_state->pasid_levels == 2) + free_pasid_states_level2(dev_state->states); + else if (dev_state->pasid_levels == 1) + free_pasid_states_level1(dev_state->states); + else if (dev_state->pasid_levels != 0) + BUG(); + + free_page((unsigned long)dev_state->states); +} + +static struct pasid_state *mn_to_state(struct mmu_notifier *mn) +{ + return container_of(mn, struct pasid_state, mn); +} + +static void __mn_flush_page(struct mmu_notifier *mn, + unsigned long address) +{ + struct pasid_state *pasid_state; + struct device_state *dev_state; + + pasid_state = mn_to_state(mn); + dev_state = pasid_state->device_state; + + amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address); +} + +static int mn_clear_flush_young(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + __mn_flush_page(mn, address); + + return 0; +} + +static void mn_change_pte(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address, + pte_t pte) +{ + __mn_flush_page(mn, address); +} + +static void mn_invalidate_page(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + __mn_flush_page(mn, address); +} + +static void mn_invalidate_range_start(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct pasid_state *pasid_state; + struct device_state *dev_state; + + pasid_state = mn_to_state(mn); + dev_state = pasid_state->device_state; + + amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid, + __pa(empty_page_table)); +} + +static void mn_invalidate_range_end(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct pasid_state *pasid_state; + struct device_state *dev_state; + + pasid_state = mn_to_state(mn); + dev_state = pasid_state->device_state; + + amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid, + __pa(pasid_state->mm->pgd)); +} + +static struct mmu_notifier_ops iommu_mn = { + .clear_flush_young = mn_clear_flush_young, + .change_pte = mn_change_pte, + .invalidate_page = mn_invalidate_page, + .invalidate_range_start = mn_invalidate_range_start, + .invalidate_range_end = mn_invalidate_range_end, +}; + +static void set_pri_tag_status(struct pasid_state *pasid_state, + u16 tag, int status) +{ + unsigned long flags; + + spin_lock_irqsave(&pasid_state->lock, flags); + pasid_state->pri[tag].status = status; + spin_unlock_irqrestore(&pasid_state->lock, flags); +} + +static void finish_pri_tag(struct device_state *dev_state, + struct pasid_state *pasid_state, + u16 tag) +{ + unsigned long flags; + + spin_lock_irqsave(&pasid_state->lock, flags); + if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) && + pasid_state->pri[tag].finish) { + amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid, + pasid_state->pri[tag].status, tag); + pasid_state->pri[tag].finish = false; + pasid_state->pri[tag].status = PPR_SUCCESS; + } + spin_unlock_irqrestore(&pasid_state->lock, flags); +} + +static void do_fault(struct work_struct *work) +{ + struct fault *fault = container_of(work, struct fault, work); + int npages, write; + struct page *page; + + write = !!(fault->flags & PPR_FAULT_WRITE); + + npages = get_user_pages(fault->state->task, fault->state->mm, + fault->address, 1, write, 0, &page, NULL); + + if (npages == 1) { + put_page(page); + } else if (fault->dev_state->inv_ppr_cb) { + int status; + + status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev, + fault->pasid, + fault->address, + fault->flags); + switch (status) { + case AMD_IOMMU_INV_PRI_RSP_SUCCESS: + set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS); + break; + case AMD_IOMMU_INV_PRI_RSP_INVALID: + set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); + break; + case AMD_IOMMU_INV_PRI_RSP_FAIL: + set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE); + break; + default: + BUG(); + } + } else { + set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); + } + + finish_pri_tag(fault->dev_state, fault->state, fault->tag); + + put_pasid_state(fault->state); + + kfree(fault); +} + +static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) +{ + struct amd_iommu_fault *iommu_fault; + struct pasid_state *pasid_state; + struct device_state *dev_state; + unsigned long flags; + struct fault *fault; + bool finish; + u16 tag; + int ret; + + iommu_fault = data; + tag = iommu_fault->tag & 0x1ff; + finish = (iommu_fault->tag >> 9) & 1; + + ret = NOTIFY_DONE; + dev_state = get_device_state(iommu_fault->device_id); + if (dev_state == NULL) + goto out; + + pasid_state = get_pasid_state(dev_state, iommu_fault->pasid); + if (pasid_state == NULL) { + /* We know the device but not the PASID -> send INVALID */ + amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid, + PPR_INVALID, tag); + goto out_drop_state; + } + + spin_lock_irqsave(&pasid_state->lock, flags); + atomic_inc(&pasid_state->pri[tag].inflight); + if (finish) + pasid_state->pri[tag].finish = true; + spin_unlock_irqrestore(&pasid_state->lock, flags); + + fault = kzalloc(sizeof(*fault), GFP_ATOMIC); + if (fault == NULL) { + /* We are OOM - send success and let the device re-fault */ + finish_pri_tag(dev_state, pasid_state, tag); + goto out_drop_state; + } + + fault->dev_state = dev_state; + fault->address = iommu_fault->address; + fault->state = pasid_state; + fault->tag = tag; + fault->finish = finish; + fault->flags = iommu_fault->flags; + INIT_WORK(&fault->work, do_fault); + + queue_work(iommu_wq, &fault->work); + + ret = NOTIFY_OK; + +out_drop_state: + put_device_state(dev_state); + +out: + return ret; +} + +static struct notifier_block ppr_nb = { + .notifier_call = ppr_notifier, +}; + +static int task_exit(struct notifier_block *nb, unsigned long e, void *data) +{ + struct pasid_state *pasid_state; + struct task_struct *task; + + task = data; + + /* + * Using this notifier is a hack - but there is no other choice + * at the moment. What I really want is a sleeping notifier that + * is called when an MM goes down. But such a notifier doesn't + * exist yet. The notifier needs to sleep because it has to make + * sure that the device does not use the PASID and the address + * space anymore before it is destroyed. This includes waiting + * for pending PRI requests to pass the workqueue. The + * MMU-Notifiers would be a good fit, but they use RCU and so + * they are not allowed to sleep. Lets see how we can solve this + * in a more intelligent way in the future. + */ +again: + spin_lock(&ps_lock); + list_for_each_entry(pasid_state, &pasid_state_list, list) { + struct device_state *dev_state; + int pasid; + + if (pasid_state->task != task) + continue; + + /* Drop Lock and unbind */ + spin_unlock(&ps_lock); + + dev_state = pasid_state->device_state; + pasid = pasid_state->pasid; + + unbind_pasid(dev_state, pasid); + + /* Task may be in the list multiple times */ + goto again; + } + spin_unlock(&ps_lock); + + return NOTIFY_OK; +} + +int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, + struct task_struct *task) +{ + struct pasid_state *pasid_state; + struct device_state *dev_state; + u16 devid; + int ret; + + might_sleep(); + + if (!amd_iommu_v2_supported()) + return -ENODEV; + + devid = device_id(pdev); + dev_state = get_device_state(devid); + + if (dev_state == NULL) + return -EINVAL; + + ret = -EINVAL; + if (pasid < 0 || pasid >= dev_state->max_pasids) + goto out; + + ret = -ENOMEM; + pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL); + if (pasid_state == NULL) + goto out; + + atomic_set(&pasid_state->count, 1); + init_waitqueue_head(&pasid_state->wq); + pasid_state->task = task; + pasid_state->mm = get_task_mm(task); + pasid_state->device_state = dev_state; + pasid_state->pasid = pasid; + pasid_state->mn.ops = &iommu_mn; + + if (pasid_state->mm == NULL) + goto out_free; + + mmu_notifier_register(&pasid_state->mn, pasid_state->mm); + + ret = set_pasid_state(dev_state, pasid_state, pasid); + if (ret) + goto out_unregister; + + ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid, + __pa(pasid_state->mm->pgd)); + if (ret) + goto out_clear_state; + + link_pasid_state(pasid_state); + + return 0; + +out_clear_state: + clear_pasid_state(dev_state, pasid); + +out_unregister: + mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm); + +out_free: + free_pasid_state(pasid_state); + +out: + put_device_state(dev_state); + + return ret; +} +EXPORT_SYMBOL(amd_iommu_bind_pasid); + +void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid) +{ + struct device_state *dev_state; + u16 devid; + + might_sleep(); + + if (!amd_iommu_v2_supported()) + return; + + devid = device_id(pdev); + dev_state = get_device_state(devid); + if (dev_state == NULL) + return; + + if (pasid < 0 || pasid >= dev_state->max_pasids) + goto out; + + unbind_pasid(dev_state, pasid); + +out: + put_device_state(dev_state); +} +EXPORT_SYMBOL(amd_iommu_unbind_pasid); + +int amd_iommu_init_device(struct pci_dev *pdev, int pasids) +{ + struct device_state *dev_state; + unsigned long flags; + int ret, tmp; + u16 devid; + + might_sleep(); + + if (!amd_iommu_v2_supported()) + return -ENODEV; + + if (pasids <= 0 || pasids > (PASID_MASK + 1)) + return -EINVAL; + + devid = device_id(pdev); + + dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL); + if (dev_state == NULL) + return -ENOMEM; + + spin_lock_init(&dev_state->lock); + init_waitqueue_head(&dev_state->wq); + dev_state->pdev = pdev; + + tmp = pasids; + for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9) + dev_state->pasid_levels += 1; + + atomic_set(&dev_state->count, 1); + dev_state->max_pasids = pasids; + + ret = -ENOMEM; + dev_state->states = (void *)get_zeroed_page(GFP_KERNEL); + if (dev_state->states == NULL) + goto out_free_dev_state; + + dev_state->domain = iommu_domain_alloc(&pci_bus_type); + if (dev_state->domain == NULL) + goto out_free_states; + + amd_iommu_domain_direct_map(dev_state->domain); + + ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids); + if (ret) + goto out_free_domain; + + ret = iommu_attach_device(dev_state->domain, &pdev->dev); + if (ret != 0) + goto out_free_domain; + + spin_lock_irqsave(&state_lock, flags); + + if (state_table[devid] != NULL) { + spin_unlock_irqrestore(&state_lock, flags); + ret = -EBUSY; + goto out_free_domain; + } + + state_table[devid] = dev_state; + + spin_unlock_irqrestore(&state_lock, flags); + + return 0; + +out_free_domain: + iommu_domain_free(dev_state->domain); + +out_free_states: + free_page((unsigned long)dev_state->states); + +out_free_dev_state: + kfree(dev_state); + + return ret; +} +EXPORT_SYMBOL(amd_iommu_init_device); + +void amd_iommu_free_device(struct pci_dev *pdev) +{ + struct device_state *dev_state; + unsigned long flags; + u16 devid; + + if (!amd_iommu_v2_supported()) + return; + + devid = device_id(pdev); + + spin_lock_irqsave(&state_lock, flags); + + dev_state = state_table[devid]; + if (dev_state == NULL) { + spin_unlock_irqrestore(&state_lock, flags); + return; + } + + state_table[devid] = NULL; + + spin_unlock_irqrestore(&state_lock, flags); + + /* Get rid of any remaining pasid states */ + free_pasid_states(dev_state); + + put_device_state_wait(dev_state); +} +EXPORT_SYMBOL(amd_iommu_free_device); + +int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, + amd_iommu_invalid_ppr_cb cb) +{ + struct device_state *dev_state; + unsigned long flags; + u16 devid; + int ret; + + if (!amd_iommu_v2_supported()) + return -ENODEV; + + devid = device_id(pdev); + + spin_lock_irqsave(&state_lock, flags); + + ret = -EINVAL; + dev_state = state_table[devid]; + if (dev_state == NULL) + goto out_unlock; + + dev_state->inv_ppr_cb = cb; + + ret = 0; + +out_unlock: + spin_unlock_irqrestore(&state_lock, flags); + + return ret; +} +EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb); + +static int __init amd_iommu_v2_init(void) +{ + size_t state_table_size; + int ret; + + pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>"); + + spin_lock_init(&state_lock); + + state_table_size = MAX_DEVICES * sizeof(struct device_state *); + state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(state_table_size)); + if (state_table == NULL) + return -ENOMEM; + + ret = -ENOMEM; + iommu_wq = create_workqueue("amd_iommu_v2"); + if (iommu_wq == NULL) + goto out_free; + + ret = -ENOMEM; + empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL); + if (empty_page_table == NULL) + goto out_destroy_wq; + + amd_iommu_register_ppr_notifier(&ppr_nb); + profile_event_register(PROFILE_TASK_EXIT, &profile_nb); + + return 0; + +out_destroy_wq: + destroy_workqueue(iommu_wq); + +out_free: + free_pages((unsigned long)state_table, get_order(state_table_size)); + + return ret; +} + +static void __exit amd_iommu_v2_exit(void) +{ + struct device_state *dev_state; + size_t state_table_size; + int i; + + profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb); + amd_iommu_unregister_ppr_notifier(&ppr_nb); + + flush_workqueue(iommu_wq); + + /* + * The loop below might call flush_workqueue(), so call + * destroy_workqueue() after it + */ + for (i = 0; i < MAX_DEVICES; ++i) { + dev_state = get_device_state(i); + + if (dev_state == NULL) + continue; + + WARN_ON_ONCE(1); + + put_device_state(dev_state); + amd_iommu_free_device(dev_state->pdev); + } + + destroy_workqueue(iommu_wq); + + state_table_size = MAX_DEVICES * sizeof(struct device_state *); + free_pages((unsigned long)state_table, get_order(state_table_size)); + + free_page((unsigned long)empty_page_table); +} + +module_init(amd_iommu_v2_init); +module_exit(amd_iommu_v2_exit); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 4c780efff169..c181883c2f9a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3542,7 +3542,7 @@ found: return 0; } -int dmar_parse_rmrr_atsr_dev(void) +int __init dmar_parse_rmrr_atsr_dev(void) { struct dmar_rmrr_unit *rmrr, *rmrr_n; struct dmar_atsr_unit *atsr, *atsr_n; diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c index 07c9f189f314..6777ca049471 100644 --- a/drivers/iommu/intr_remapping.c +++ b/drivers/iommu/intr_remapping.c @@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void) return ir_supported; } -int ir_dev_scope_init(void) +int __init ir_dev_scope_init(void) { if (!intr_remapping_enabled) return 0; diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 9c192e79f806..288da5c1499d 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include <linux/module.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c index 0b7b14cb030b..6edc4ceba197 100644 --- a/drivers/iommu/omap-iovmm.c +++ b/drivers/iommu/omap-iovmm.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/vmalloc.h> diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index 33ec9e467772..9021182c4b76 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c @@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg) case IIOCDOCFINT: if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid)) return (-EINVAL); /* invalid driver */ + if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) == + sizeof(dioctl.cf_ctrl.msn)) + return -EINVAL; + if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) == + sizeof(dioctl.cf_ctrl.fwd_nr)) + return -EINVAL; if ((i = cf_command(dioctl.cf_ctrl.drvid, (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2, dioctl.cf_ctrl.cfproc, diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 1f73d7f7e024..2339d7396b9e 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) char *c, *e; + if (strnlen(cfg->drvid, sizeof(cfg->drvid)) == + sizeof(cfg->drvid)) + return -EINVAL; drvidx = -1; chidx = -1; strcpy(drvid, cfg->drvid); diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 661b692573e7..6d5628bb0601 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -270,11 +270,8 @@ void led_blink_set(struct led_classdev *led_cdev, del_timer_sync(&led_cdev->blink_timer); if (led_cdev->blink_set && - !led_cdev->blink_set(led_cdev, delay_on, delay_off)) { - led_cdev->blink_delay_on = *delay_on; - led_cdev->blink_delay_off = *delay_off; + !led_cdev->blink_set(led_cdev, delay_on, delay_off)) return; - } /* blink with 1 Hz as default if nothing specified */ if (!*delay_on && !*delay_off) diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index 817f37a875c9..c9570fcf1cce 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c @@ -159,7 +159,7 @@ int macii_init(void) err = macii_init_via(); if (err) goto out; - err = request_irq(IRQ_MAC_ADB, macii_interrupt, IRQ_FLG_LOCK, "ADB", + err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB", macii_interrupt); if (err) goto out; diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c index 9ab5b0c34f0d..34d02a91b29f 100644 --- a/drivers/macintosh/via-maciisi.c +++ b/drivers/macintosh/via-maciisi.c @@ -122,8 +122,8 @@ maciisi_init(void) return err; } - if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, IRQ_FLG_LOCK | IRQ_FLG_FAST, - "ADB", maciisi_interrupt)) { + if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, 0, "ADB", + maciisi_interrupt)) { printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB); return -EAGAIN; } diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7878712721bf..b6907118283a 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap) */ int i; + spin_lock_irq(&bitmap->lock); for (i = 0; i < bitmap->file_pages; i++) set_page_attr(bitmap, bitmap->filemap[i], BITMAP_PAGE_NEEDWRITE); bitmap->allclean = 0; + spin_unlock_irq(&bitmap->lock); } static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc) @@ -1605,7 +1607,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) for (chunk = s; chunk <= e; chunk++) { sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap); bitmap_set_memory_bits(bitmap, sec, 1); + spin_lock_irq(&bitmap->lock); bitmap_file_set_bit(bitmap, sec); + spin_unlock_irq(&bitmap->lock); if (sec < bitmap->mddev->recovery_cp) /* We are asserting that the array is dirty, * so move the recovery_cp address back so diff --git a/drivers/md/md.c b/drivers/md/md.c index 84acfe7d10e4..ee981737edfc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev) mddev->ctime == 0 && !mddev->hold_active) { /* Array is not configured at all, and not held active, * so destroy it */ - list_del(&mddev->all_mddevs); + list_del_init(&mddev->all_mddevs); bs = mddev->bio_set; mddev->bio_set = NULL; if (mddev->gendisk) { @@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page) sep = ","; } if (test_bit(Blocked, &rdev->flags) || - rdev->badblocks.unacked_exist) { + (rdev->badblocks.unacked_exist + && !test_bit(Faulty, &rdev->flags))) { len += sprintf(page+len, "%sblocked", sep); sep = ","; } @@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) if (err) return err; else { + if (mddev->hold_active == UNTIL_IOCTL) + mddev->hold_active = 0; sysfs_notify_dirent_safe(mddev->sysfs_state); return len; } @@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) if (!entry->show) return -EIO; + spin_lock(&all_mddevs_lock); + if (list_empty(&mddev->all_mddevs)) { + spin_unlock(&all_mddevs_lock); + return -EBUSY; + } + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); + rv = mddev_lock(mddev); if (!rv) { rv = entry->show(mddev, page); mddev_unlock(mddev); } + mddev_put(mddev); return rv; } @@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, return -EIO; if (!capable(CAP_SYS_ADMIN)) return -EACCES; + spin_lock(&all_mddevs_lock); + if (list_empty(&mddev->all_mddevs)) { + spin_unlock(&all_mddevs_lock); + return -EBUSY; + } + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); rv = mddev_lock(mddev); - if (mddev->hold_active == UNTIL_IOCTL) - mddev->hold_active = 0; if (!rv) { rv = entry->store(mddev, page, length); mddev_unlock(mddev); } + mddev_put(mddev); return rv; } @@ -7840,6 +7858,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, s + rdev->data_offset, sectors, acknowledged); if (rv) { /* Make sure they get written out promptly */ + sysfs_notify_dirent_safe(rdev->sysfs_state); set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); md_wakeup_thread(rdev->mddev->thread); } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 472aedfb07cf..31670f8d6b65 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) if (dev->written) s->written++; rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev && test_bit(Faulty, &rdev->flags)) + rdev = NULL; if (rdev) { is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, &first_bad, &bad_sectors); @@ -3063,12 +3065,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) } } else if (test_bit(In_sync, &rdev->flags)) set_bit(R5_Insync, &dev->flags); - else if (!test_bit(Faulty, &rdev->flags)) { + else { /* in sync if before recovery_offset */ if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) set_bit(R5_Insync, &dev->flags); } - if (test_bit(R5_WriteError, &dev->flags)) { + if (rdev && test_bit(R5_WriteError, &dev->flags)) { clear_bit(R5_Insync, &dev->flags); if (!test_bit(Faulty, &rdev->flags)) { s->handle_bad_blocks = 1; @@ -3076,7 +3078,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) } else clear_bit(R5_WriteError, &dev->flags); } - if (test_bit(R5_MadeGood, &dev->flags)) { + if (rdev && test_bit(R5_MadeGood, &dev->flags)) { if (!test_bit(Faulty, &rdev->flags)) { s->handle_bad_blocks = 1; atomic_inc(&rdev->nr_pending); @@ -3110,7 +3112,7 @@ static void handle_stripe(struct stripe_head *sh) struct r5dev *pdev, *qdev; clear_bit(STRIPE_HANDLE, &sh->state); - if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { + if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { /* already being handled, ensure it gets handled * again when current action finishes */ set_bit(STRIPE_HANDLE, &sh->state); @@ -3159,10 +3161,14 @@ static void handle_stripe(struct stripe_head *sh) /* check if the array has lost more than max_degraded devices and, * if so, some requests might need to be failed. */ - if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written) - handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); - if (s.failed > conf->max_degraded && s.syncing) - handle_failed_sync(conf, sh, &s); + if (s.failed > conf->max_degraded) { + sh->check_state = 0; + sh->reconstruct_state = 0; + if (s.to_read+s.to_write+s.written) + handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); + if (s.syncing) + handle_failed_sync(conf, sh, &s); + } /* * might be able to return some write requests if the parity blocks @@ -3371,7 +3377,7 @@ finish: return_io(s.return_bi); - clear_bit(STRIPE_ACTIVE, &sh->state); + clear_bit_unlock(STRIPE_ACTIVE, &sh->state); } static void raid5_activate_delayed(struct r5conf *conf) diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c index 2e8c288258a9..34434557ef65 100644 --- a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c +++ b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c @@ -398,7 +398,6 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state, u8 i2c_r_data[24]; u8 i = 0; u8 fifo_status = 0; - int ret; int status = 0; mxl_i2c("read %d bytes", count); @@ -418,7 +417,7 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state, i2c_w_data[4+(i*3)] = 0x00; } - ret = mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data); + mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data); /* Check for I2C NACK status */ if (mxl111sf_i2c_check_status(state) == 1) { diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c index 91dc1fc2825b..b741b3a7a325 100644 --- a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c +++ b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c @@ -296,8 +296,7 @@ int mxl111sf_config_spi(struct mxl111sf_state *state, int onoff) goto fail; ret = mxl111sf_write_reg(state, 0x00, 0x00); - if (mxl_fail(ret)) - goto fail; + mxl_fail(ret); fail: return ret; } @@ -328,11 +327,13 @@ int mxl111sf_idac_config(struct mxl111sf_state *state, /* set hysteresis value reg: 0x0B<5:0> */ ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG, (hysteresis_value & 0x3F)); + mxl_fail(ret); } ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val); + mxl_fail(ret); - return val; + return ret; } /* diff --git a/drivers/media/video/s5k6aa.c b/drivers/media/video/s5k6aa.c index 2446736b7871..0df7f2a41814 100644 --- a/drivers/media/video/s5k6aa.c +++ b/drivers/media/video/s5k6aa.c @@ -19,6 +19,7 @@ #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/media.h> +#include <linux/module.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c index 725634d9736d..844a4d7797bc 100644 --- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c @@ -220,8 +220,8 @@ static int vidioc_querycap(struct file *file, void *priv, strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); cap->bus_info[0] = 0; cap->version = KERNEL_VERSION(1, 0, 0); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT - | V4L2_CAP_STREAMING; + cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE | + V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING; return 0; } diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c index ecef127dbc66..1e8cdb77d4b8 100644 --- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c @@ -785,8 +785,8 @@ static int vidioc_querycap(struct file *file, void *priv, strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); cap->bus_info[0] = 0; cap->version = KERNEL_VERSION(1, 0, 0); - cap->capabilities = V4L2_CAP_VIDEO_CAPTURE - | V4L2_CAP_VIDEO_OUTPUT + cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE + | V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING; return 0; } diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c index 10c2364f3e8a..254d32688843 100644 --- a/drivers/media/video/uvc/uvc_ctrl.c +++ b/drivers/media/video/uvc/uvc_ctrl.c @@ -1016,7 +1016,8 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain, menu_info = &mapping->menu_info[query_menu->index]; - if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { + if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK && + (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) { s32 bitmap; if (!ctrl->cached) { @@ -1225,7 +1226,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain, /* Valid menu indices are reported by the GET_RES request for * UVC controls that support it. */ - if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { + if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK && + (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) { if (!ctrl->cached) { ret = uvc_ctrl_populate_cache(chain, ctrl); if (ret < 0) diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c index f17f92b86a30..0f415dade05a 100644 --- a/drivers/media/video/v4l2-ctrls.c +++ b/drivers/media/video/v4l2-ctrls.c @@ -821,8 +821,8 @@ static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes) fill_event(&ev, ctrl, changes); list_for_each_entry(sev, &ctrl->ev_subs, node) - if (sev->fh && (sev->fh != fh || - (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))) + if (sev->fh != fh || + (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)) v4l2_event_queue_fh(sev->fh, &ev); } @@ -947,6 +947,7 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, if (ctrl->cluster[0]->has_volatiles) ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; } + fh = NULL; } if (changed || update_inactive) { /* If a control was changed that was not one of the controls diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c index 46037f225529..c26ad9637143 100644 --- a/drivers/media/video/v4l2-event.c +++ b/drivers/media/video/v4l2-event.c @@ -216,6 +216,9 @@ int v4l2_event_subscribe(struct v4l2_fh *fh, unsigned long flags; unsigned i; + if (sub->type == V4L2_EVENT_ALL) + return -EINVAL; + if (elems < 1) elems = 1; if (sub->type == V4L2_EVENT_CTRL) { @@ -283,6 +286,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, { struct v4l2_subscribed_event *sev; unsigned long flags; + int i; if (sub->type == V4L2_EVENT_ALL) { v4l2_event_unsubscribe_all(fh); @@ -293,8 +297,12 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, sev = v4l2_event_subscribed(fh, sub->type, sub->id); if (sev != NULL) { + /* Remove any pending events for this subscription */ + for (i = 0; i < sev->in_use; i++) { + list_del(&sev->events[sev_pos(sev, i)].list); + fh->navailable--; + } list_del(&sev->list); - sev->fh = NULL; } spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c index 979e544388cb..95a3f5e82aef 100644 --- a/drivers/media/video/videobuf2-core.c +++ b/drivers/media/video/videobuf2-core.c @@ -131,6 +131,7 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n) continue; for (plane = 0; plane < vb->num_planes; ++plane) { + vb->v4l2_planes[plane].length = q->plane_sizes[plane]; vb->v4l2_planes[plane].m.mem_offset = off; dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", @@ -264,6 +265,7 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) q->num_buffers -= buffers; if (!q->num_buffers) q->memory = 0; + INIT_LIST_HEAD(&q->queued_list); } /** @@ -296,14 +298,14 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) { unsigned int plane; for (plane = 0; plane < vb->num_planes; ++plane) { + void *mem_priv = vb->planes[plane].mem_priv; /* * If num_users() has not been provided, call_memop * will return 0, apparently nobody cares about this * case anyway. If num_users() returns more than 1, * we are not the only user of the plane's memory. */ - if (call_memop(q, plane, num_users, - vb->planes[plane].mem_priv) > 1) + if (mem_priv && call_memop(q, plane, num_users, mem_priv) > 1) return true; } return false; diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c index 4175544b491b..ec10629a0b0b 100644 --- a/drivers/mfd/ab5500-core.c +++ b/drivers/mfd/ab5500-core.c @@ -13,6 +13,7 @@ * TODO: Event handling with irq_chip. Waiting for PRCMU fw support. */ +#include <linux/module.h> #include <linux/mutex.h> #include <linux/err.h> #include <linux/platform_device.h> diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c index 6be1fe6b5f9a..43c0ebb81956 100644 --- a/drivers/mfd/ab5500-debugfs.c +++ b/drivers/mfd/ab5500-debugfs.c @@ -4,6 +4,7 @@ * Debugfs support for the AB5500 MFD driver */ +#include <linux/export.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/mfd/ab5500/ab5500.h> diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index d593878d66d0..5664696f2d3a 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -472,7 +472,7 @@ config BMP085 module will be called bmp085. config PCH_PHUB - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB" + tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" depends on PCI help This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of @@ -480,12 +480,13 @@ config PCH_PHUB processor. The Topcliff has MAC address and Option ROM data in SROM. This driver can access MAC address and Option ROM data in SROM. - This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ - Output Hub), ML7213 and ML7223. - ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is - for MP(Media Phone) use. - ML7213/ML7223 is companion chip for Intel Atom E6xx series. - ML7213/ML7223 is completely compatible for Intel EG20T PCH. + This driver also can be used for LAPIS Semiconductor's IOH, + ML7213/ML7223/ML7831. + ML7213 which is for IVI(In-Vehicle Infotainment) use. + ML7223 IOH is for MP(Media Phone) use. + ML7831 IOH is for general purpose use. + ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. To compile this driver as a module, choose M here: the module will be called pch_phub. diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h index a662f5987b68..82b2cb77ae19 100644 --- a/drivers/misc/ad525x_dpot.h +++ b/drivers/misc/ad525x_dpot.h @@ -100,7 +100,7 @@ enum dpot_devid { AD5293_ID = DPOT_CONF(F_RDACS_RW | F_SPI_16BIT, BRDAC0, 10, 27), AD7376_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, BRDAC0, 7, 28), - AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_8BIT, + AD8400_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, BRDAC0, 8, 29), AD8402_ID = DPOT_CONF(F_RDACS_WONLY | F_AD_APPDATA | F_SPI_16BIT, BRDAC0 | BRDAC1, 8, 30), diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c index 7ce6065dc20e..eb5cd28bc6d8 100644 --- a/drivers/misc/carma/carma-fpga-program.c +++ b/drivers/misc/carma/carma-fpga-program.c @@ -945,8 +945,7 @@ static int fpga_of_remove(struct platform_device *op) /* CTL-CPLD Version Register */ #define CTL_CPLD_VERSION 0x2000 -static int fpga_of_probe(struct platform_device *op, - const struct of_device_id *match) +static int fpga_of_probe(struct platform_device *op) { struct device_node *of_node = op->dev.of_node; struct device *this_device; @@ -1107,7 +1106,7 @@ static struct of_device_id fpga_of_match[] = { {}, }; -static struct of_platform_driver fpga_of_driver = { +static struct platform_driver fpga_of_driver = { .probe = fpga_of_probe, .remove = fpga_of_remove, .driver = { @@ -1124,12 +1123,12 @@ static struct of_platform_driver fpga_of_driver = { static int __init fpga_init(void) { led_trigger_register_simple("fpga", &ledtrig_fpga); - return of_register_platform_driver(&fpga_of_driver); + return platform_driver_register(&fpga_of_driver); } static void __exit fpga_exit(void) { - of_unregister_platform_driver(&fpga_of_driver); + platform_driver_unregister(&fpga_of_driver); led_trigger_unregister_simple(ledtrig_fpga); } diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c index 3965821fef17..14e974b2a781 100644 --- a/drivers/misc/carma/carma-fpga.c +++ b/drivers/misc/carma/carma-fpga.c @@ -1249,8 +1249,7 @@ static bool dma_filter(struct dma_chan *chan, void *data) return true; } -static int data_of_probe(struct platform_device *op, - const struct of_device_id *match) +static int data_of_probe(struct platform_device *op) { struct device_node *of_node = op->dev.of_node; struct device *this_device; @@ -1401,7 +1400,7 @@ static struct of_device_id data_of_match[] = { {}, }; -static struct of_platform_driver data_of_driver = { +static struct platform_driver data_of_driver = { .probe = data_of_probe, .remove = data_of_remove, .driver = { @@ -1417,12 +1416,12 @@ static struct of_platform_driver data_of_driver = { static int __init data_init(void) { - return of_register_platform_driver(&data_of_driver); + return platform_driver_register(&data_of_driver); } static void __exit data_exit(void) { - of_unregister_platform_driver(&data_of_driver); + platform_driver_unregister(&data_of_driver); } MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index 26cf12ca7f50..701edf658970 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig @@ -85,7 +85,7 @@ config EEPROM_93XX46 config EEPROM_DIGSY_MTC_CFG bool "DigsyMTC display configuration EEPROMs device" - depends on PPC_MPC5200_GPIO && GPIOLIB && SPI_GPIO + depends on GPIO_MPC5200 && SPI_GPIO help This option enables access to display configuration EEPROMs on digsy_mtc board. You have to additionally select Microwire diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c index dee33addcaeb..10fc4785dba7 100644 --- a/drivers/misc/pch_phub.c +++ b/drivers/misc/pch_phub.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. + * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -41,10 +41,10 @@ #define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset (Intel EG20T PCH)*/ #define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address - offset(OKI SEMICONDUCTOR ML7213) + offset(LAPIS Semicon ML7213) */ #define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address - offset(OKI SEMICONDUCTOR ML7223) + offset(LAPIS Semicon ML7223) */ /* MAX number of INT_REDUCE_CONTROL registers */ @@ -73,6 +73,9 @@ #define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */ #define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */ +/* Macros for ML7831 */ +#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801 + /* SROM ACCESS Macro */ #define PCH_WORD_ADDR_MASK (~((1 << 2) - 1)) @@ -115,6 +118,7 @@ * @pch_mac_start_address: MAC address area start address * @pch_opt_rom_start_address: Option ROM start address * @ioh_type: Save IOH type + * @pdev: pointer to pci device struct */ struct pch_phub_reg { u32 phub_id_reg; @@ -136,6 +140,7 @@ struct pch_phub_reg { u32 pch_mac_start_address; u32 pch_opt_rom_start_address; int ioh_type; + struct pci_dev *pdev; }; /* SROM SPEC for MAC address assignment offset */ @@ -471,7 +476,7 @@ static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data) int retval; int i; - if (chip->ioh_type == 1) /* EG20T */ + if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/ retval = pch_phub_gbe_serial_rom_conf(chip); else /* ML7223 */ retval = pch_phub_gbe_serial_rom_conf_mp(chip); @@ -498,6 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, unsigned int orom_size; int ret; int err; + ssize_t rom_size; struct pch_phub_reg *chip = dev_get_drvdata(container_of(kobj, struct device, kobj)); @@ -509,6 +515,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, } /* Get Rom signature */ + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + goto exrom_map_err; + pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, (unsigned char *)&rom_signature); rom_signature &= 0xff; @@ -539,10 +549,13 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, goto return_err; } return_ok: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); mutex_unlock(&pch_phub_mutex); return addr_offset; return_err: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); +exrom_map_err: mutex_unlock(&pch_phub_mutex); return_err_nomutex: return err; @@ -555,6 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, int err; unsigned int addr_offset; int ret; + ssize_t rom_size; struct pch_phub_reg *chip = dev_get_drvdata(container_of(kobj, struct device, kobj)); @@ -571,6 +585,12 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, goto return_ok; } + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) { + err = -ENOMEM; + goto exrom_map_err; + } + for (addr_offset = 0; addr_offset < count; addr_offset++) { if (PCH_PHUB_OROM_SIZE < off + addr_offset) goto return_ok; @@ -585,10 +605,14 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, } return_ok: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); mutex_unlock(&pch_phub_mutex); return addr_offset; return_err: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + +exrom_map_err: mutex_unlock(&pch_phub_mutex); return err; } @@ -598,8 +622,14 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr, { u8 mac[8]; struct pch_phub_reg *chip = dev_get_drvdata(dev); + ssize_t rom_size; + + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + return -ENOMEM; pch_phub_read_gbe_mac_addr(chip, mac); + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); return sprintf(buf, "%pM\n", mac); } @@ -608,6 +638,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u8 mac[6]; + ssize_t rom_size; struct pch_phub_reg *chip = dev_get_drvdata(dev); if (count != 18) @@ -617,7 +648,12 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr, (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3], (u32 *)&mac[4], (u32 *)&mac[5]); + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + return -ENOMEM; + pch_phub_write_gbe_mac_addr(chip, mac); + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); return count; } @@ -640,7 +676,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, int retval; int ret; - ssize_t rom_size; struct pch_phub_reg *chip; chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL); @@ -677,19 +712,7 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, "in pch_phub_base_address variable is %p\n", __func__, chip->pch_phub_base_address); - if (id->driver_data != 3) { - chip->pch_phub_extrom_base_address =\ - pci_map_rom(pdev, &rom_size); - if (chip->pch_phub_extrom_base_address == 0) { - dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__); - ret = -ENOMEM; - goto err_pci_map; - } - dev_dbg(&pdev->dev, "%s : " - "pci_map_rom SUCCESS and value in " - "pch_phub_extrom_base_address variable is %p\n", - __func__, chip->pch_phub_extrom_base_address); - } + chip->pdev = pdev; /* Save pci device struct */ if (id->driver_data == 1) { /* EG20T PCH */ const char *board_name; @@ -763,6 +786,22 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, chip->pch_opt_rom_start_address =\ PCH_PHUB_ROM_START_ADDR_ML7223; chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; + } else if (id->driver_data == 5) { /* ML7831 */ + retval = sysfs_create_file(&pdev->dev.kobj, + &dev_attr_pch_mac.attr); + if (retval) + goto err_sysfs_create; + + retval = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr); + if (retval) + goto exit_bin_attr; + + /* set the prefech value */ + iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14); + /* set the interrupt delay value */ + iowrite32(0x25, chip->pch_phub_base_address + 0x44); + chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T; + chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T; } chip->ioh_type = id->driver_data; @@ -773,8 +812,6 @@ exit_bin_attr: sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); err_sysfs_create: - pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address); -err_pci_map: pci_iounmap(pdev, chip->pch_phub_base_address); err_pci_iomap: pci_release_regions(pdev); @@ -792,7 +829,6 @@ static void __devexit pch_phub_remove(struct pci_dev *pdev) sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr); - pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address); pci_iounmap(pdev, chip->pch_phub_base_address); pci_release_regions(pdev); pci_disable_device(pdev); @@ -847,6 +883,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = { { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, }, { } }; MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id); @@ -873,5 +910,5 @@ static void __exit pch_phub_pci_exit(void) module_init(pch_phub_pci_init); module_exit(pch_phub_pci_exit); -MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7213/ML7223) PHUB"); +MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB"); MODULE_LICENSE("GPL"); diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c index cfbddbef11de..43d073bc1d9c 100644 --- a/drivers/misc/spear13xx_pcie_gadget.c +++ b/drivers/misc/spear13xx_pcie_gadget.c @@ -903,6 +903,6 @@ static void __exit spear_pcie_gadget_exit(void) } module_exit(spear_pcie_gadget_exit); -MODULE_ALIAS("pcie-gadget-spear"); +MODULE_ALIAS("platform:pcie-gadget-spear"); MODULE_AUTHOR("Pratyush Anand"); MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index ae57769ba50d..4b976f00ea85 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -32,6 +32,7 @@ /* VENDOR SPEC register */ #define SDHCI_VENDOR_SPEC 0xC0 #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 +#define SDHCI_WTMK_LVL 0x44 #define SDHCI_MIX_CTRL 0x48 /* @@ -476,6 +477,13 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev) if (is_imx53_esdhc(imx_data)) imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; + /* + * The imx6q ROM code will change the default watermark level setting + * to something insane. Change it back here. + */ + if (is_imx6q_usdhc(imx_data)) + writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL); + boarddata = &imx_data->boarddata; if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { if (!host->mmc->parent->platform_data) { diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c index 608967fe74c6..736ca10ca9f1 100644 --- a/drivers/mtd/maps/bcm963xx-flash.c +++ b/drivers/mtd/maps/bcm963xx-flash.c @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/module.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 583f66cd5bbd..654a5e94e0e7 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -245,6 +245,8 @@ source "drivers/net/ethernet/Kconfig" source "drivers/net/fddi/Kconfig" +source "drivers/net/hippi/Kconfig" + config NET_SB1000 tristate "General Instruments Surfboard 1000" depends on PNP diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig index a73d9dc80ff6..84fb6349a59a 100644 --- a/drivers/net/arcnet/Kconfig +++ b/drivers/net/arcnet/Kconfig @@ -4,7 +4,7 @@ menuconfig ARCNET depends on NETDEVICES && (ISA || PCI || PCMCIA) - bool "ARCnet support" + tristate "ARCnet support" ---help--- If you have a network card of this type, say Y and check out the (arguably) beautiful poetry in diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b0c577256487..7f8756825b8a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2553,30 +2553,6 @@ re_arm: } } -static __be32 bond_glean_dev_ip(struct net_device *dev) -{ - struct in_device *idev; - struct in_ifaddr *ifa; - __be32 addr = 0; - - if (!dev) - return 0; - - rcu_read_lock(); - idev = __in_dev_get_rcu(dev); - if (!idev) - goto out; - - ifa = idev->ifa_list; - if (!ifa) - goto out; - - addr = ifa->ifa_local; -out: - rcu_read_unlock(); - return addr; -} - static int bond_has_this_ip(struct bonding *bond, __be32 ip) { struct vlan_entry *vlan; @@ -3322,6 +3298,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, struct bonding *bond; struct vlan_entry *vlan; + /* we only care about primary address */ + if(ifa->ifa_flags & IFA_F_SECONDARY) + return NOTIFY_DONE; + list_for_each_entry(bond, &bn->dev_list, bond_list) { if (bond->dev == event_dev) { switch (event) { @@ -3329,7 +3309,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, bond->master_ip = ifa->ifa_local; return NOTIFY_OK; case NETDEV_DOWN: - bond->master_ip = bond_glean_dev_ip(bond->dev); + bond->master_ip = 0; return NOTIFY_OK; default: return NOTIFY_DONE; @@ -3345,8 +3325,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event, vlan->vlan_ip = ifa->ifa_local; return NOTIFY_OK; case NETDEV_DOWN: - vlan->vlan_ip = - bond_glean_dev_ip(vlan_dev); + vlan->vlan_ip = 0; return NOTIFY_OK; default: return NOTIFY_DONE; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 5a20804fdece..4ef7e2fd9fe6 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -319,6 +319,13 @@ static ssize_t bonding_store_mode(struct device *d, goto out; } + if (bond->slave_cnt > 0) { + pr_err("unable to update mode of %s because it has slaves.\n", + bond->dev->name); + ret = -EPERM; + goto out; + } + new_value = bond_parse_parm(buf, bond_mode_tbl); if (new_value < 0) { pr_err("%s: Ignoring invalid mode value %.*s.\n", diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 905bce0b3a43..2c7f5036f570 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -20,7 +20,6 @@ */ #include <linux/kernel.h> -#include <linux/version.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 4cf835dbc122..3fb66d09ece5 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp) skb->len, DMA_TO_DEVICE); rp->skb = NULL; - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); } bp->tx_cons = cons; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index bce203fa4b9e..882f48f0a03c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -10327,6 +10327,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, return 0; } + +static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + struct bnx2x *bp = params->bp; + u16 temp; + + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL1); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= 0xff00; + + DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode); + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + temp |= 0x00ee; + break; + case LED_MODE_OPER: + temp |= 0x0001; + break; + case LED_MODE_ON: + temp |= 0x00ff; + break; + default: + break; + } + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + return; +} + + static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, struct link_params *params) { @@ -11103,7 +11140,7 @@ static struct bnx2x_phy phy_54618se = { .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, - .set_link_led = (set_link_led_t)NULL, + .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, .phy_specific_func = (phy_specific_func_t)NULL }; /*****************************************************************/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6486ab8c8fc8..2f6361e949f0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10548,33 +10548,38 @@ do { \ int bnx2x_init_firmware(struct bnx2x *bp) { - const char *fw_file_name; struct bnx2x_fw_file_hdr *fw_hdr; int rc; - if (CHIP_IS_E1(bp)) - fw_file_name = FW_FILE_NAME_E1; - else if (CHIP_IS_E1H(bp)) - fw_file_name = FW_FILE_NAME_E1H; - else if (!CHIP_IS_E1x(bp)) - fw_file_name = FW_FILE_NAME_E2; - else { - BNX2X_ERR("Unsupported chip revision\n"); - return -EINVAL; - } - BNX2X_DEV_INFO("Loading %s\n", fw_file_name); + if (!bp->firmware) { + const char *fw_file_name; - rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); - if (rc) { - BNX2X_ERR("Can't load firmware file %s\n", fw_file_name); - goto request_firmware_exit; - } + if (CHIP_IS_E1(bp)) + fw_file_name = FW_FILE_NAME_E1; + else if (CHIP_IS_E1H(bp)) + fw_file_name = FW_FILE_NAME_E1H; + else if (!CHIP_IS_E1x(bp)) + fw_file_name = FW_FILE_NAME_E2; + else { + BNX2X_ERR("Unsupported chip revision\n"); + return -EINVAL; + } + BNX2X_DEV_INFO("Loading %s\n", fw_file_name); - rc = bnx2x_check_firmware(bp); - if (rc) { - BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); - goto request_firmware_exit; + rc = request_firmware(&bp->firmware, fw_file_name, + &bp->pdev->dev); + if (rc) { + BNX2X_ERR("Can't load firmware file %s\n", + fw_file_name); + goto request_firmware_exit; + } + + rc = bnx2x_check_firmware(bp); + if (rc) { + BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); + goto request_firmware_exit; + } } fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; @@ -10630,6 +10635,7 @@ static void bnx2x_release_firmware(struct bnx2x *bp) kfree(bp->init_ops); kfree(bp->init_data); release_firmware(bp->firmware); + bp->firmware = NULL; } @@ -10925,6 +10931,8 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) if (bp->doorbells) iounmap(bp->doorbells); + bnx2x_release_firmware(bp); + bnx2x_free_mem_bp(bp); free_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index fc7bd0f23c0b..e58073ef33b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -6990,6 +6990,7 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_REG_INTR_MASK 0x1b #define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) #define MDIO_REG_GPHY_SHADOW 0x1c +#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10) #define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10) #define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15) #define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 0440425c83d6..14517691f8db 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -5380,7 +5380,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, rc = drv->init_fw(bp); if (rc) { BNX2X_ERR("Error loading firmware\n"); - goto fw_init_err; + goto init_err; } /* Handle the beginning of COMMON_XXX pases separatelly... */ @@ -5388,25 +5388,25 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: rc = bnx2x_func_init_cmn_chip(bp, drv); if (rc) - goto init_hw_err; + goto init_err; break; case FW_MSG_CODE_DRV_LOAD_COMMON: rc = bnx2x_func_init_cmn(bp, drv); if (rc) - goto init_hw_err; + goto init_err; break; case FW_MSG_CODE_DRV_LOAD_PORT: rc = bnx2x_func_init_port(bp, drv); if (rc) - goto init_hw_err; + goto init_err; break; case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = bnx2x_func_init_func(bp, drv); if (rc) - goto init_hw_err; + goto init_err; break; default: @@ -5414,10 +5414,7 @@ static int bnx2x_func_hw_init(struct bnx2x *bp, rc = -EINVAL; } -init_hw_err: - drv->release_fw(bp); - -fw_init_err: +init_err: drv->gunzip_end(bp); /* In case of success, complete the comand immediatelly: no ramrods diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 98849a1fc749..b48378a41e49 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -7,6 +7,7 @@ config HAVE_NET_MACB config NET_ATMEL bool "Atmel devices" + default y depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200) ---help--- If you have a network (Ethernet) card belonging to this class, say Y. diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 438f4580bf66..2a22f5256353 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -613,7 +613,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) if (!dm->wake_state) irq_set_irq_wake(dm->irq_wake, 1); - else if (dm->wake_state & !opts) + else if (dm->wake_state && !opts) irq_set_irq_wake(dm->irq_wake, 0); } diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index c520cfd3b298..5272f9d4dda9 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -24,6 +24,7 @@ config FEC bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ ARCH_MXC || ARCH_MXS) + default ARCH_MXC || ARCH_MXS if ARM select PHYLIB ---help--- Say Y here if you want to use the built-in 10/100 Fast ethernet diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h index 410d6a1984ed..6650068c996c 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea.h +++ b/drivers/net/ethernet/ibm/ehea/ehea.h @@ -61,9 +61,9 @@ #ifdef EHEA_SMALL_QUEUES #define EHEA_MAX_CQE_COUNT 1023 #define EHEA_DEF_ENTRIES_SQ 1023 -#define EHEA_DEF_ENTRIES_RQ1 4095 +#define EHEA_DEF_ENTRIES_RQ1 1023 #define EHEA_DEF_ENTRIES_RQ2 1023 -#define EHEA_DEF_ENTRIES_RQ3 1023 +#define EHEA_DEF_ENTRIES_RQ3 511 #else #define EHEA_MAX_CQE_COUNT 4080 #define EHEA_DEF_ENTRIES_SQ 4080 diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 37b70f7052b6..bfeccbfde236 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work) out_herr: free_page((unsigned long)cb2); resched: - schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000)); + schedule_delayed_work(&port->stats_work, + round_jiffies_relative(msecs_to_jiffies(1000))); } static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) @@ -2434,7 +2435,8 @@ static int ehea_open(struct net_device *dev) } mutex_unlock(&port->port_lock); - schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000)); + schedule_delayed_work(&port->stats_work, + round_jiffies_relative(msecs_to_jiffies(1000))); return ret; } diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c index 4326681df382..acc31af6594a 100644 --- a/drivers/net/ethernet/ibm/iseries_veth.c +++ b/drivers/net/ethernet/ibm/iseries_veth.c @@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx, /* FIXME: do we need this? */ memset(local_list, 0, sizeof(local_list)); - memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG)); + memset(remote_list, 0, sizeof(remote_list)); /* a 0 address marks the end of the valid entries */ if (senddata->addr[startchunk] == 0) diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 7becff1f387d..76b84573566b 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme) } static int +jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg) +{ + u32 phy_addr; + + phy_addr = JM_PHY_SPEC_REG_READ | specreg; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, + phy_addr); + return jme_mdio_read(jme->dev, jme->mii_if.phy_id, + JM_PHY_SPEC_DATA_REG); +} + +static void +jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data) +{ + u32 phy_addr; + + phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG, + phy_data); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG, + phy_addr); +} + +static int +jme_phy_calibration(struct jme_adapter *jme) +{ + u32 ctrl1000, phy_data; + + jme_phy_off(jme); + jme_phy_on(jme); + /* Enabel PHY test mode 1 */ + ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); + ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; + ctrl1000 |= PHY_GAD_TEST_MODE_1; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); + + phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); + phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0; + phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH | + JM_PHY_EXT_COMM_2_CALI_ENABLE; + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); + msleep(20); + phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG); + phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE | + JM_PHY_EXT_COMM_2_CALI_MODE_0 | + JM_PHY_EXT_COMM_2_CALI_LATCH); + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data); + + /* Disable PHY test mode */ + ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000); + ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000); + return 0; +} + +static int +jme_phy_setEA(struct jme_adapter *jme) +{ + u32 phy_comm0 = 0, phy_comm1 = 0; + u8 nic_ctrl; + + pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl); + if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE) + return 0; + + switch (jme->pdev->device) { + case PCI_DEVICE_ID_JMICRON_JMC250: + if (((jme->chip_main_rev == 5) && + ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || + (jme->chip_sub_rev == 3))) || + (jme->chip_main_rev >= 6)) { + phy_comm0 = 0x008A; + phy_comm1 = 0x4109; + } + if ((jme->chip_main_rev == 3) && + ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) + phy_comm0 = 0xE088; + break; + case PCI_DEVICE_ID_JMICRON_JMC260: + if (((jme->chip_main_rev == 5) && + ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) || + (jme->chip_sub_rev == 3))) || + (jme->chip_main_rev >= 6)) { + phy_comm0 = 0x008A; + phy_comm1 = 0x4109; + } + if ((jme->chip_main_rev == 3) && + ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2))) + phy_comm0 = 0xE088; + if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0)) + phy_comm0 = 0x608A; + if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2)) + phy_comm0 = 0x408A; + break; + default: + return -ENODEV; + } + if (phy_comm0) + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0); + if (phy_comm1) + jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1); + + return 0; +} + +static int jme_open(struct net_device *netdev) { struct jme_adapter *jme = netdev_priv(netdev); @@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev) jme_set_settings(netdev, &jme->old_ecmd); else jme_reset_phy_processor(jme); - + jme_phy_calibration(jme); + jme_phy_setEA(jme); jme_reset_link(jme); return 0; @@ -3184,7 +3291,8 @@ jme_resume(struct device *dev) jme_set_settings(netdev, &jme->old_ecmd); else jme_reset_phy_processor(jme); - + jme_phy_calibration(jme); + jme_phy_setEA(jme); jme_start_irq(jme); netif_device_attach(netdev); @@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, jme_pci_tbl); - diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index 02ea27c1dcb5..4304072bd3c5 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -760,6 +760,25 @@ enum jme_rxmcs_bits { RXMCS_CHECKSUM, }; +/* Extern PHY common register 2 */ + +#define PHY_GAD_TEST_MODE_1 0x00002000 +#define PHY_GAD_TEST_MODE_MSK 0x0000E000 +#define JM_PHY_SPEC_REG_READ 0x00004000 +#define JM_PHY_SPEC_REG_WRITE 0x00008000 +#define PHY_CALIBRATION_DELAY 20 +#define JM_PHY_SPEC_ADDR_REG 0x1E +#define JM_PHY_SPEC_DATA_REG 0x1F + +#define JM_PHY_EXT_COMM_0_REG 0x30 +#define JM_PHY_EXT_COMM_1_REG 0x31 +#define JM_PHY_EXT_COMM_2_REG 0x32 +#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01 +#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02 +#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10 +#define PCI_PRIV_SHARE_NICCTRL 0xF5 +#define JME_FLAG_PHYEA_ENABLE 0x2 + /* * Wakeup Frame setup interface registers */ diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6bb2b9506cad..0b3567ab8121 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -34,6 +34,8 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> #include <asm/checksum.h> diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index fdc6c394c683..7803efa46eb2 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -50,7 +50,7 @@ #include "sky2.h" #define DRV_NAME "sky2" -#define DRV_VERSION "1.29" +#define DRV_VERSION "1.30" /* * The Yukon II chipset takes 64 bit command blocks (called list elements) @@ -68,7 +68,7 @@ #define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) #define TX_MIN_PENDING (MAX_SKB_TX_LE+1) #define TX_MAX_PENDING 1024 -#define TX_DEF_PENDING 127 +#define TX_DEF_PENDING 63 #define TX_WATCHDOG (5 * HZ) #define NAPI_WEIGHT 64 @@ -869,6 +869,7 @@ static void sky2_wol_init(struct sky2_port *sky2) /* block receiver */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + sky2_read32(hw, B0_CTST); } static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port) @@ -1274,6 +1275,14 @@ static void rx_set_checksum(struct sky2_port *sky2) ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } +/* + * Fixed initial key as seed to RSS. + */ +static const uint32_t rss_init_key[10] = { + 0x7c3351da, 0x51c5cf4e, 0x44adbdd1, 0xe8d38d18, 0x48897c43, + 0xb1d60e7e, 0x6a3dd760, 0x01a2e453, 0x16f46f13, 0x1a0e7b30 +}; + /* Enable/disable receive hash calculation (RSS) */ static void rx_set_rss(struct net_device *dev, u32 features) { @@ -1289,12 +1298,9 @@ static void rx_set_rss(struct net_device *dev, u32 features) /* Program RSS initial values */ if (features & NETIF_F_RXHASH) { - u32 key[nkeys]; - - get_random_bytes(key, nkeys * sizeof(u32)); for (i = 0; i < nkeys; i++) sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), - key[i]); + rss_init_key[i]); /* Need to turn on (undocumented) flag to make hashing work */ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), @@ -1717,6 +1723,8 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name) if (err) dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); else { + hw->flags |= SKY2_HW_IRQ_SETUP; + napi_enable(&hw->napi); sky2_write32(hw, B0_IMSK, Y2_IS_BASE); sky2_read32(hw, B0_IMSK); @@ -1727,7 +1735,7 @@ static int sky2_setup_irq(struct sky2_hw *hw, const char *name) /* Bring up network interface. */ -static int sky2_up(struct net_device *dev) +static int sky2_open(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -1747,6 +1755,11 @@ static int sky2_up(struct net_device *dev) sky2_hw_up(sky2); + if (hw->chip_id == CHIP_ID_YUKON_OPT || + hw->chip_id == CHIP_ID_YUKON_PRM || + hw->chip_id == CHIP_ID_YUKON_OP_2) + imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */ + /* Enable interrupts from phy/mac for port */ imask = sky2_read32(hw, B0_IMSK); imask |= portirq_msk[port]; @@ -2040,6 +2053,8 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port) sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + + sky2_read32(hw, B0_CTST); } static void sky2_hw_down(struct sky2_port *sky2) @@ -2090,7 +2105,7 @@ static void sky2_hw_down(struct sky2_port *sky2) } /* Network shutdown */ -static int sky2_down(struct net_device *dev) +static int sky2_close(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -2101,15 +2116,22 @@ static int sky2_down(struct net_device *dev) netif_info(sky2, ifdown, dev, "disabling interface\n"); - /* Disable port IRQ */ - sky2_write32(hw, B0_IMSK, - sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]); - sky2_read32(hw, B0_IMSK); - if (hw->ports == 1) { + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); + napi_disable(&hw->napi); free_irq(hw->pdev->irq, hw); + hw->flags &= ~SKY2_HW_IRQ_SETUP; } else { + u32 imask; + + /* Disable port IRQ */ + imask = sky2_read32(hw, B0_IMSK); + imask &= ~portirq_msk[sky2->port]; + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + synchronize_irq(hw->pdev->irq); napi_synchronize(&hw->napi); } @@ -2587,7 +2609,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) if (netif_running(dev)) { sky2_tx_complete(sky2, last); - /* Wake unless it's detached, and called e.g. from sky2_down() */ + /* Wake unless it's detached, and called e.g. from sky2_close() */ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) netif_wake_queue(dev); } @@ -3258,7 +3280,6 @@ static void sky2_reset(struct sky2_hw *hw) hw->chip_id == CHIP_ID_YUKON_PRM || hw->chip_id == CHIP_ID_YUKON_OP_2) { u16 reg; - u32 msk; if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ @@ -3281,11 +3302,6 @@ static void sky2_reset(struct sky2_hw *hw) sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); - /* enable PHY Quick Link */ - msk = sky2_read32(hw, B0_IMSK); - msk |= Y2_IS_PHY_QLNK; - sky2_write32(hw, B0_IMSK, msk); - /* check if PSMv2 was running before */ reg = sky2_pci_read16(hw, PSM_CONFIG_REG3); if (reg & PCI_EXP_LNKCTL_ASPMC) @@ -3383,7 +3399,7 @@ static void sky2_detach(struct net_device *dev) netif_tx_lock(dev); netif_device_detach(dev); /* stop txq */ netif_tx_unlock(dev); - sky2_down(dev); + sky2_close(dev); } } @@ -3393,7 +3409,7 @@ static int sky2_reattach(struct net_device *dev) int err = 0; if (netif_running(dev)) { - err = sky2_up(dev); + err = sky2_open(dev); if (err) { netdev_info(dev, "could not restart %d\n", err); dev_close(dev); @@ -3410,10 +3426,13 @@ static void sky2_all_down(struct sky2_hw *hw) { int i; - sky2_read32(hw, B0_IMSK); - sky2_write32(hw, B0_IMSK, 0); - synchronize_irq(hw->pdev->irq); - napi_disable(&hw->napi); + if (hw->flags & SKY2_HW_IRQ_SETUP) { + sky2_read32(hw, B0_IMSK); + sky2_write32(hw, B0_IMSK, 0); + + synchronize_irq(hw->pdev->irq); + napi_disable(&hw->napi); + } for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; @@ -3446,11 +3465,12 @@ static void sky2_all_up(struct sky2_hw *hw) netif_wake_queue(dev); } - sky2_write32(hw, B0_IMSK, imask); - sky2_read32(hw, B0_IMSK); - - sky2_read32(hw, B0_Y2_SP_LISR); - napi_enable(&hw->napi); + if (hw->flags & SKY2_HW_IRQ_SETUP) { + sky2_write32(hw, B0_IMSK, imask); + sky2_read32(hw, B0_IMSK); + sky2_read32(hw, B0_Y2_SP_LISR); + napi_enable(&hw->napi); + } } static void sky2_restart(struct work_struct *work) @@ -4071,6 +4091,16 @@ static int sky2_set_coalesce(struct net_device *dev, return 0; } +/* + * Hardware is limited to min of 128 and max of 2048 for ring size + * and rounded up to next power of two + * to avoid division in modulus calclation + */ +static unsigned long roundup_ring_size(unsigned long pending) +{ + return max(128ul, roundup_pow_of_two(pending+1)); +} + static void sky2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { @@ -4098,7 +4128,7 @@ static int sky2_set_ringparam(struct net_device *dev, sky2->rx_pending = ering->rx_pending; sky2->tx_pending = ering->tx_pending; - sky2->tx_ring_size = roundup_pow_of_two(sky2->tx_pending+1); + sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending); return sky2_reattach(dev); } @@ -4556,7 +4586,7 @@ static int sky2_device_event(struct notifier_block *unused, struct net_device *dev = ptr; struct sky2_port *sky2 = netdev_priv(dev); - if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug) + if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug) return NOTIFY_DONE; switch (event) { @@ -4621,8 +4651,8 @@ static __exit void sky2_debug_cleanup(void) not allowing netpoll on second port */ static const struct net_device_ops sky2_netdev_ops[2] = { { - .ndo_open = sky2_up, - .ndo_stop = sky2_down, + .ndo_open = sky2_open, + .ndo_stop = sky2_close, .ndo_start_xmit = sky2_xmit_frame, .ndo_do_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, @@ -4638,8 +4668,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = { #endif }, { - .ndo_open = sky2_up, - .ndo_stop = sky2_down, + .ndo_open = sky2_open, + .ndo_stop = sky2_close, .ndo_start_xmit = sky2_xmit_frame, .ndo_do_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, @@ -4692,7 +4722,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, spin_lock_init(&sky2->phy_lock); sky2->tx_pending = TX_DEF_PENDING; - sky2->tx_ring_size = roundup_pow_of_two(TX_DEF_PENDING+1); + sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING); sky2->rx_pending = RX_DEF_PENDING; hw->dev[port] = dev; diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index 0af31b8b5f10..ff6f58bf822a 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2287,6 +2287,7 @@ struct sky2_hw { #define SKY2_HW_RSS_BROKEN 0x00000100 #define SKY2_HW_VLAN_BROKEN 0x00000200 #define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */ +#define SKY2_HW_IRQ_SETUP 0x00000800 u8 chip_id; u8 chip_rev; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index b89c36dbf5b3..c2df6c358603 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -581,6 +581,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * Packet is OK - process it. */ length = be32_to_cpu(cqe->byte_cnt); + length -= ring->fcs_del; ring->bytes += length; ring->packets++; @@ -813,8 +814,11 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); /* Cancel FCS removal if FW allows */ - if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) + if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) { context->param3 |= cpu_to_be32(1 << 29); + ring->fcs_del = ETH_FCS_LEN; + } else + ring->fcs_del = 0; err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 8fda331c65df..207b5add3ca8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -272,6 +272,7 @@ struct mlx4_en_rx_ring { u32 prod; u32 cons; u32 buf_size; + u8 fcs_del; void *buf; void *rx_info; unsigned long bytes; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 1dca57013cb2..1c61d36e6570 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -609,7 +609,7 @@ struct nv_ethtool_str { }; static const struct nv_ethtool_str nv_estats_str[] = { - { "tx_bytes" }, + { "tx_bytes" }, /* includes Ethernet FCS CRC */ { "tx_zero_rexmt" }, { "tx_one_rexmt" }, { "tx_many_rexmt" }, @@ -637,7 +637,7 @@ static const struct nv_ethtool_str nv_estats_str[] = { /* version 2 stats */ { "tx_deferral" }, { "tx_packets" }, - { "rx_bytes" }, + { "rx_bytes" }, /* includes Ethernet FCS CRC */ { "tx_pause" }, { "rx_pause" }, { "rx_drop_frame" }, @@ -649,7 +649,7 @@ static const struct nv_ethtool_str nv_estats_str[] = { }; struct nv_ethtool_stats { - u64 tx_bytes; + u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */ u64 tx_zero_rexmt; u64 tx_one_rexmt; u64 tx_many_rexmt; @@ -670,14 +670,14 @@ struct nv_ethtool_stats { u64 rx_unicast; u64 rx_multicast; u64 rx_broadcast; - u64 rx_packets; + u64 rx_packets; /* should be ifconfig->rx_packets */ u64 rx_errors_total; u64 tx_errors_total; /* version 2 stats */ u64 tx_deferral; - u64 tx_packets; - u64 rx_bytes; + u64 tx_packets; /* should be ifconfig->tx_packets */ + u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */ u64 tx_pause; u64 rx_pause; u64 rx_drop_frame; @@ -1706,10 +1706,17 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev) if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { nv_get_hw_stats(dev); + /* + * Note: because HW stats are not always available and + * for consistency reasons, the following ifconfig + * stats are managed by software: rx_bytes, tx_bytes, + * rx_packets and tx_packets. The related hardware + * stats reported by ethtool should be equivalent to + * these ifconfig stats, with 4 additional bytes per + * packet (Ethernet FCS CRC). + */ + /* copy to net_device stats */ - dev->stats.tx_packets = np->estats.tx_packets; - dev->stats.rx_bytes = np->estats.rx_bytes; - dev->stats.tx_bytes = np->estats.tx_bytes; dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; dev->stats.rx_crc_errors = np->estats.rx_crc_errors; @@ -2380,6 +2387,9 @@ static int nv_tx_done(struct net_device *dev, int limit) if (flags & NV_TX_ERROR) { if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->get_tx_ctx->skb->len; } dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; @@ -2390,6 +2400,9 @@ static int nv_tx_done(struct net_device *dev, int limit) if (flags & NV_TX2_ERROR) { if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->get_tx_ctx->skb->len; } dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; @@ -2429,6 +2442,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) else nv_legacybackoff_reseed(dev); } + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->get_tx_ctx->skb->len; } dev_kfree_skb_any(np->get_tx_ctx->skb); @@ -2678,6 +2694,7 @@ static int nv_rx_process(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&np->napi, skb); dev->stats.rx_packets++; + dev->stats.rx_bytes += len; next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) np->get_rx.orig = np->first_rx.orig; @@ -2761,6 +2778,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) } napi_gro_receive(&np->napi, skb); dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } else { dev_kfree_skb(skb); } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c index 9c075ea2682e..9cb5f912e489 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c @@ -18,8 +18,8 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ -#include <linux/module.h> /* for __MODULE_STRING */ #include "pch_gbe.h" +#include <linux/module.h> /* for __MODULE_STRING */ #define OPTION_UNSET -1 #define OPTION_DISABLED 0 diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile index 05db5434bafc..90497ffb1ac3 100644 --- a/drivers/net/ethernet/pasemi/Makefile +++ b/drivers/net/ethernet/pasemi/Makefile @@ -2,4 +2,5 @@ # Makefile for the A Semi network device drivers. # -obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o +obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o +pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index 8731f79c9efc..b8478aab050e 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h @@ -58,10 +58,8 @@ #define TX_DESC_PER_IOCB 8 -/* The maximum number of frags we handle is based - * on PAGE_SIZE... - */ -#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */ + +#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0 #define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) #else /* all other page sizes */ #define TX_DESC_PER_OAL 0 @@ -1353,7 +1351,7 @@ struct tx_ring_desc { struct ob_mac_iocb_req *queue_entry; u32 index; struct oal oal; - struct map_list map[MAX_SKB_FRAGS + 1]; + struct map_list map[MAX_SKB_FRAGS + 2]; int map_cnt; struct tx_ring_desc *next; }; diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 1fc01ca72b46..4bf68cfef390 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -940,7 +940,7 @@ static void r6040_multicast_list(struct net_device *dev) iowrite16(lp->mcr0, ioaddr + MCR0); /* Fill the MAC hash tables with their values */ - if (lp->mcr0 && MCR0_HASH_EN) { + if (lp->mcr0 & MCR0_HASH_EN) { iowrite16(hash_table[0], ioaddr + MAR0); iowrite16(hash_table[1], ioaddr + MAR1); iowrite16(hash_table[2], ioaddr + MAR2); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 92b45f08858f..67bf07819992 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1183,11 +1183,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) return value; } -static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { - RTL_W16(IntrMask, 0x0000); + void __iomem *ioaddr = tp->mmio_addr; - RTL_W16(IntrStatus, 0xffff); + RTL_W16(IntrMask, 0x0000); + RTL_W16(IntrStatus, tp->intr_event); + RTL_R8(ChipCmd); } static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) @@ -1292,7 +1294,7 @@ static void __rtl8169_check_link_status(struct net_device *dev, netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); if (pm) - pm_schedule_suspend(&tp->pci_dev->dev, 100); + pm_schedule_suspend(&tp->pci_dev->dev, 5000); } spin_unlock_irqrestore(&tp->lock, flags); } @@ -3933,8 +3935,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp) break; udelay(100); } - - rtl8169_init_ring_indexes(tp); } static int __devinit @@ -4339,7 +4339,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; /* Disable interrupts */ - rtl8169_irq_mask_and_ack(ioaddr); + rtl8169_irq_mask_and_ack(tp); rtl_rx_close(tp); @@ -4885,8 +4885,7 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W16(IntrMitigate, 0x5151); /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11 || - tp->mac_version == RTL_GIGA_MAC_VER_22) { + if (tp->mac_version == RTL_GIGA_MAC_VER_11) { tp->intr_event |= RxFIFOOver | PCSTimeout; tp->intr_event &= ~RxOverflow; } @@ -5076,6 +5075,11 @@ static void rtl_hw_start_8101(struct net_device *dev) void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; + if (tp->mac_version >= RTL_GIGA_MAC_VER_30) { + tp->intr_event &= ~RxFIFOOver; + tp->napi_event &= ~RxFIFOOver; + } + if (tp->mac_version == RTL_GIGA_MAC_VER_13 || tp->mac_version == RTL_GIGA_MAC_VER_16) { int cap = pci_pcie_cap(pdev); @@ -5342,7 +5346,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) /* Wait for any pending NAPI task to complete */ napi_disable(&tp->napi); - rtl8169_irq_mask_and_ack(ioaddr); + rtl8169_irq_mask_and_ack(tp); tp->intr_mask = 0xffff; RTL_W16(IntrMask, tp->intr_event); @@ -5389,14 +5393,16 @@ static void rtl8169_reset_task(struct work_struct *work) if (!netif_running(dev)) goto out_unlock; + rtl8169_hw_reset(tp); + rtl8169_wait_for_quiescence(dev); for (i = 0; i < NUM_RX_DESC; i++) rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); - rtl8169_hw_reset(tp); rtl_hw_start(dev); netif_wake_queue(dev); rtl8169_check_link_status(dev, tp, tp->mmio_addr); @@ -5407,11 +5413,6 @@ out_unlock: static void rtl8169_tx_timeout(struct net_device *dev) { - struct rtl8169_private *tp = netdev_priv(dev); - - rtl8169_hw_reset(tp); - - /* Let's wait a bit while any (async) irq lands on */ rtl8169_schedule_work(dev, rtl8169_reset_task); } @@ -5804,6 +5805,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) */ status = RTL_R16(IntrStatus); while (status && status != 0xffff) { + status &= tp->intr_event; + if (!status) + break; + handled = 1; /* Handle all of the error cases first. These will reset @@ -5818,27 +5823,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) switch (tp->mac_version) { /* Work around for rx fifo overflow */ case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_26: netif_stop_queue(dev); rtl8169_tx_timeout(dev); goto done; - /* Testers needed. */ - case RTL_GIGA_MAC_VER_17: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - /* Experimental science. Pktgen proof. */ - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_25: - if (status == RxFIFOOver) - goto done; - break; default: break; } diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index d2be42aafbef..8843071fe987 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1937,6 +1937,7 @@ static int __devinit smsc911x_init(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); unsigned int byte_test; + unsigned int to = 100; SMSC_TRACE(pdata, probe, "Driver Parameters:"); SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX", @@ -1952,6 +1953,17 @@ static int __devinit smsc911x_init(struct net_device *dev) return -ENODEV; } + /* + * poll the READY bit in PMT_CTRL. Any other access to the device is + * forbidden while this bit isn't set. Try for 100ms + */ + while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) + udelay(1000); + if (to == 0) { + pr_err("Device not READY in 100ms aborting\n"); + return -ENODEV; + } + /* Check byte ordering */ byte_test = smsc911x_reg_read(pdata, BYTE_TEST); SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index da66ac511c4c..4d5402a1d262 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -39,10 +39,11 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, /* DMA SW reset */ value |= DMA_BUS_MODE_SFT_RESET; writel(value, ioaddr + DMA_BUS_MODE); - limit = 15000; + limit = 10; while (limit--) { if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) break; + mdelay(10); } if (limit < 0) return -EBUSY; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 627f656b0f3c..bc17fd08b55d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -41,10 +41,11 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, /* DMA SW reset */ value |= DMA_BUS_MODE_SFT_RESET; writel(value, ioaddr + DMA_BUS_MODE); - limit = 15000; + limit = 10; while (limit--) { if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) break; + mdelay(10); } if (limit < 0) return -EBUSY; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 9bafa6cf9e8b..a140a8fbf051 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -72,7 +72,6 @@ struct stmmac_priv { spinlock_t lock; spinlock_t tx_lock; int wolopts; - int wolenabled; int wol_irq; #ifdef CONFIG_STMMAC_TIMER struct stmmac_timer *tm; @@ -80,6 +79,7 @@ struct stmmac_priv { struct plat_stmmacenet_data *plat; struct stmmac_counters mmc; struct dma_features dma_cap; + int hw_cap_support; }; extern int stmmac_mdio_unregister(struct net_device *ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index e8eff09bbbd7..0395f9eba801 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -430,6 +430,12 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct stmmac_priv *priv = netdev_priv(dev); u32 support = WAKE_MAGIC | WAKE_UCAST; + /* By default almost all GMAC devices support the WoL via + * magic frame but we can disable it if the HW capability + * register shows no support for pmt_magic_frame. */ + if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame)) + wol->wolopts &= ~WAKE_MAGIC; + if (!device_can_wakeup(priv->device)) return -EINVAL; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 20546bbbb8db..72cd190b9c1a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -321,12 +321,10 @@ static int stmmac_init_phy(struct net_device *dev) } /* Stop Advertising 1000BASE Capability if interface is not GMII */ - if ((interface) && ((interface == PHY_INTERFACE_MODE_MII) || - (interface == PHY_INTERFACE_MODE_RMII))) { - phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phydev->advertising = phydev->supported; - } + if ((interface == PHY_INTERFACE_MODE_MII) || + (interface == PHY_INTERFACE_MODE_RMII)) + phydev->advertising &= ~(SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); /* * Broken HW is sometimes missing the pull-up resistor on the @@ -783,10 +781,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; - /* Do not manage MMC IRQ (FIXME) */ + /* Mask MMC irq, counters are managed in SW and registers + * are cleared on each READ eventually. */ dwmac_mmc_intr_all_mask(priv->ioaddr); - dwmac_mmc_ctrl(priv->ioaddr, mode); - memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); + + if (priv->dma_cap.rmon) { + dwmac_mmc_ctrl(priv->ioaddr, mode); + memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); + } else + pr_info(" No MAC Management Counters available"); } static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) @@ -807,8 +810,29 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) return 0; } -/* New GMAC chips support a new register to indicate the - * presence of the optional feature/functions. +/** + * stmmac_selec_desc_mode + * @dev : device pointer + * Description: select the Enhanced/Alternate or Normal descriptors */ +static void stmmac_selec_desc_mode(struct stmmac_priv *priv) +{ + if (priv->plat->enh_desc) { + pr_info(" Enhanced/Alternate descriptors\n"); + priv->hw->desc = &enh_desc_ops; + } else { + pr_info(" Normal descriptors\n"); + priv->hw->desc = &ndesc_ops; + } +} + +/** + * stmmac_get_hw_features + * @priv : private device pointer + * Description: + * new GMAC chip generations have a new register to indicate the + * presence of the optional feature/functions. + * This can be also used to override the value passed through the + * platform and necessary for old MAC10/100 and GMAC chips. */ static int stmmac_get_hw_features(struct stmmac_priv *priv) { @@ -829,7 +853,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv) (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; priv->dma_cap.pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; - /*MMC*/ + /* MMC */ priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; /* IEEE 1588-2002*/ priv->dma_cap.time_stamp = @@ -857,8 +881,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv) priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; - } else - pr_debug("\tNo HW DMA feature register supported"); + } return hw_cap; } @@ -913,6 +936,44 @@ static int stmmac_open(struct net_device *dev) goto open_error; } + stmmac_get_synopsys_id(priv); + + priv->hw_cap_support = stmmac_get_hw_features(priv); + + if (priv->hw_cap_support) { + pr_info(" Support DMA HW capability register"); + + /* We can override some gmac/dma configuration fields: e.g. + * enh_desc, tx_coe (e.g. that are passed through the + * platform) with the values from the HW capability + * register (if supported). + */ + priv->plat->enh_desc = priv->dma_cap.enh_desc; + priv->plat->tx_coe = priv->dma_cap.tx_coe; + priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; + + /* By default disable wol on magic frame if not supported */ + if (!priv->dma_cap.pmt_magic_frame) + priv->wolopts &= ~WAKE_MAGIC; + + } else + pr_info(" No HW DMA feature register supported"); + + /* Select the enhnaced/normal descriptor structures */ + stmmac_selec_desc_mode(priv); + + /* PMT module is not integrated in all the MAC devices. */ + if (priv->plat->pmt) { + pr_info(" Remote wake-up capable\n"); + device_set_wakeup_capable(priv->device, 1); + } + + priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); + if (priv->rx_coe) + pr_info(" Checksum Offload Engine supported\n"); + if (priv->plat->tx_coe) + pr_info(" Checksum insertion supported\n"); + /* Create and initialize the TX/RX descriptors chains. */ priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); @@ -935,15 +996,6 @@ static int stmmac_open(struct net_device *dev) /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->ioaddr); - stmmac_get_synopsys_id(priv); - - stmmac_get_hw_features(priv); - - priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); - if (priv->rx_coe) - pr_info("stmmac: Rx Checksum Offload Engine supported\n"); - if (priv->plat->tx_coe) - pr_info("\tTX Checksum insertion supported\n"); netdev_update_features(dev); /* Request the IRQ lines */ @@ -965,8 +1017,7 @@ static int stmmac_open(struct net_device *dev) memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; - if (priv->dma_cap.rmon) - stmmac_mmc_setup(priv); + stmmac_mmc_setup(priv); /* Start the ball rolling... */ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); @@ -1489,9 +1540,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!priv->phydev) return -EINVAL; - spin_lock(&priv->lock); ret = phy_mii_ioctl(priv->phydev, rq, cmd); - spin_unlock(&priv->lock); return ret; } @@ -1558,7 +1607,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); - if (!stmmac_get_hw_features(priv)) { + if (!priv->hw_cap_support) { seq_printf(seq, "DMA HW features not supported\n"); return 0; } @@ -1766,12 +1815,6 @@ static int stmmac_mac_device_setup(struct net_device *dev) if (!device) return -ENOMEM; - if (priv->plat->enh_desc) { - device->desc = &enh_desc_ops; - pr_info("\tEnhanced descriptor structure\n"); - } else - device->desc = &ndesc_ops; - priv->hw = device; priv->hw->ring = &ring_mode_ops; @@ -1845,11 +1888,6 @@ static int stmmac_dvr_probe(struct platform_device *pdev) priv->ioaddr = addr; - /* PMT module is not integrated in all the MAC devices. */ - if (plat_dat->pmt) { - pr_info("\tPMT module supported\n"); - device_set_wakeup_capable(&pdev->dev, 1); - } /* * On some platforms e.g. SPEAr the wake up irq differs from the mac irq * The external wake up irq can be passed through the platform code @@ -1862,7 +1900,6 @@ static int stmmac_dvr_probe(struct platform_device *pdev) if (priv->wol_irq == -ENXIO) priv->wol_irq = ndev->irq; - platform_set_drvdata(pdev, ndev); /* Set the I/O base addr */ @@ -1875,7 +1912,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev) goto out_free_ndev; } - /* MAC HW revice detection */ + /* MAC HW device detection */ ret = stmmac_mac_device_setup(ndev); if (ret < 0) goto out_plat_exit; @@ -1978,12 +2015,13 @@ static int stmmac_suspend(struct device *dev) if (!ndev || !netif_running(ndev)) return 0; + if (priv->phydev) + phy_stop(priv->phydev); + spin_lock(&priv->lock); netif_device_detach(ndev); netif_stop_queue(ndev); - if (priv->phydev) - phy_stop(priv->phydev); #ifdef CONFIG_STMMAC_TIMER priv->tm->timer_stop(); @@ -2041,12 +2079,13 @@ static int stmmac_resume(struct device *dev) #endif napi_enable(&priv->napi); - if (priv->phydev) - phy_start(priv->phydev); - netif_start_queue(ndev); spin_unlock(&priv->lock); + + if (priv->phydev) + phy_start(priv->phydev); + return 0; } diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index c517dac02ae1..cf14ab9db576 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2637,7 +2637,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i sbus_dp = op->dev.parent->of_node; /* We can match PCI devices too, do not accept those here. */ - if (strcmp(sbus_dp->name, "sbus")) + if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi")) return err; if (is_qfe) { diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 10826d8a2a2d..1187a1169eb2 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) goto done; /* Re-enable the ingress interrupt. */ - enable_percpu_irq(priv->intr_id); + enable_percpu_irq(priv->intr_id, 0); /* HACK: Avoid the "rotting packet" problem (see above). */ if (qup->__packet_receive_read != @@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr) info->napi_enabled = true; /* Enable the ingress interrupt. */ - enable_percpu_irq(priv->intr_id); + enable_percpu_irq(priv->intr_id, 0); } @@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, for (i = 0; i < sh->nr_frags; i++) { skb_frag_t *f = &sh->frags[i]; - unsigned long pfn = page_to_pfn(f->page); + unsigned long pfn = page_to_pfn(skb_frag_page(f)); /* FIXME: Compute "hash_for_home" properly. */ /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ @@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, /* FIXME: Hmmm. */ if (!hash_default) { void *va = pfn_to_kaddr(pfn) + f->page_offset; - BUG_ON(PageHighMem(f->page)); + BUG_ON(PageHighMem(skb_frag_page(f))); finv_buffer_remote(va, f->size, 0); } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index caf3659e173c..2681b53820ee 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -114,6 +114,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) return; temac_iow(lp, XTE_LSW0_OFFSET, value); temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); + temac_indirect_busywait(lp); } /** @@ -203,6 +204,9 @@ static void temac_dma_bd_release(struct net_device *ndev) struct temac_local *lp = netdev_priv(ndev); int i; + /* Reset Local Link (DMA) */ + lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); + for (i = 0; i < RX_BD_NUM; i++) { if (!lp->rx_skb[i]) break; @@ -860,6 +864,8 @@ static int temac_open(struct net_device *ndev) phy_start(lp->phy_dev); } + temac_device_reset(ndev); + rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev); if (rc) goto err_tx_irq; @@ -867,7 +873,6 @@ static int temac_open(struct net_device *ndev) if (rc) goto err_rx_irq; - temac_device_reset(ndev); return 0; err_rx_irq: diff --git a/drivers/net/hippi/Kconfig b/drivers/net/hippi/Kconfig index 7393eb732ee6..95eb34fdbba7 100644 --- a/drivers/net/hippi/Kconfig +++ b/drivers/net/hippi/Kconfig @@ -36,4 +36,4 @@ config ROADRUNNER_LARGE_RINGS kernel code or by user space programs. Say Y here only if you have the memory. -endif /* HIPPI */ +endif # HIPPI diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index bb88e12101c7..a70244306c94 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -3,7 +3,7 @@ # menuconfig PHYLIB - bool "PHY Device support and infrastructure" + tristate "PHY Device support and infrastructure" depends on !S390 depends on NETDEVICES help diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index e81e22e3d1d2..e6fed4d4cb77 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -36,7 +36,7 @@ #include <linux/usb/usbnet.h> #include <linux/slab.h> -#define DRIVER_VERSION "26-Sep-2011" +#define DRIVER_VERSION "08-Nov-2011" #define DRIVER_NAME "asix" /* ASIX AX8817X based USB 2.0 Ethernet Devices */ @@ -163,7 +163,7 @@ #define MARVELL_CTRL_TXDELAY 0x0002 #define MARVELL_CTRL_RXDELAY 0x0080 -#define PHY_MODE_RTL8211CL 0x0004 +#define PHY_MODE_RTL8211CL 0x000C /* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */ struct asix_data { @@ -652,9 +652,17 @@ static u32 asix_get_phyid(struct usbnet *dev) { int phy_reg; u32 phy_id; + int i; - phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1); - if (phy_reg < 0) + /* Poll for the rare case the FW or phy isn't ready yet. */ + for (i = 0; i < 100; i++) { + phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1); + if (phy_reg != 0 && phy_reg != 0xFFFF) + break; + mdelay(1); + } + + if (phy_reg <= 0 || phy_reg == 0xFFFF) return 0; phy_id = (phy_reg & 0xffff) << 16; @@ -1075,7 +1083,7 @@ static const struct net_device_ops ax88772_netdev_ops = { static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) { - int ret; + int ret, embd_phy; struct asix_data *data = (struct asix_data *)&dev->data; u8 buf[ETH_ALEN]; u32 phyid; @@ -1100,16 +1108,36 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) dev->mii.reg_num_mask = 0x1f; dev->mii.phy_id = asix_get_phy_addr(dev); - phyid = asix_get_phyid(dev); - dbg("PHYID=0x%08x", phyid); - dev->net->netdev_ops = &ax88772_netdev_ops; dev->net->ethtool_ops = &ax88772_ethtool_ops; - ret = ax88772_reset(dev); + embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); + + /* Reset the PHY to normal operation mode */ + ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); + if (ret < 0) { + dbg("Select PHY #1 failed: %d", ret); + return ret; + } + + ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL); + if (ret < 0) + return ret; + + msleep(150); + + ret = asix_sw_reset(dev, AX_SWRESET_CLEAR); if (ret < 0) return ret; + msleep(150); + + ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE); + + /* Read PHYID register *AFTER* the PHY was reset properly */ + phyid = asix_get_phyid(dev); + dbg("PHYID=0x%08x", phyid); + /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { /* hard_mtu is still the default - the device does not support @@ -1220,6 +1248,7 @@ static int ax88178_reset(struct usbnet *dev) __le16 eeprom; u8 status; int gpio0 = 0; + u32 phyid; asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status); dbg("GPIO Status: 0x%04x", status); @@ -1235,12 +1264,13 @@ static int ax88178_reset(struct usbnet *dev) data->ledmode = 0; gpio0 = 1; } else { - data->phymode = le16_to_cpu(eeprom) & 7; + data->phymode = le16_to_cpu(eeprom) & 0x7F; data->ledmode = le16_to_cpu(eeprom) >> 8; gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1; } dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode); + /* Power up external GigaPHY through AX88178 GPIO pin */ asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40); if ((le16_to_cpu(eeprom) >> 8) != 1) { asix_write_gpio(dev, 0x003c, 30); @@ -1252,6 +1282,13 @@ static int ax88178_reset(struct usbnet *dev) asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30); } + /* Read PHYID register *AFTER* powering up PHY */ + phyid = asix_get_phyid(dev); + dbg("PHYID=0x%08x", phyid); + + /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */ + asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL); + asix_sw_reset(dev, 0); msleep(150); @@ -1396,7 +1433,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf) { int ret; u8 buf[ETH_ALEN]; - u32 phyid; struct asix_data *data = (struct asix_data *)&dev->data; data->eeprom_len = AX88772_EEPROM_LEN; @@ -1423,12 +1459,12 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->netdev_ops = &ax88178_netdev_ops; dev->net->ethtool_ops = &ax88178_ethtool_ops; - phyid = asix_get_phyid(dev); - dbg("PHYID=0x%08x", phyid); + /* Blink LEDS so users know driver saw dongle */ + asix_sw_reset(dev, 0); + msleep(150); - ret = ax88178_reset(dev); - if (ret < 0) - return ret; + asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD); + msleep(150); /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index c924ea2bce07..99ed6eb4dfaf 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -567,7 +567,7 @@ static const struct usb_device_id products [] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = (unsigned long)&wwan_info, + .driver_info = 0, }, /* diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c index d43db32f9478..9c26c6390d69 100644 --- a/drivers/net/usb/lg-vl600.c +++ b/drivers/net/usb/lg-vl600.c @@ -144,10 +144,11 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } frame = (struct vl600_frame_hdr *) buf->data; - /* NOTE: Should check that frame->magic == 0x53544448? - * Otherwise if we receive garbage at the beginning of the frame - * we may end up allocating a huge buffer and saving all the - * future incoming data into it. */ + /* Yes, check that frame->magic == 0x53544448 (or 0x44544d48), + * otherwise we may run out of memory w/a bad packet */ + if (ntohl(frame->magic) != 0x53544448 && + ntohl(frame->magic) != 0x44544d48) + goto error; if (buf->len < sizeof(*frame) || buf->len != le32_to_cpup(&frame->len)) { @@ -296,6 +297,11 @@ encapsulate: * overwrite the remaining fields. */ packet = (struct vl600_pkt_hdr *) skb->data; + /* The VL600 wants IPv6 packets to have an IPv4 ethertype + * Since this modem only supports IPv4 and IPv6, just set all + * frames to 0x0800 (ETH_P_IP) + */ + packet->h_proto = htons(ETH_P_IP); memset(&packet->dummy, 0, sizeof(packet->dummy)); packet->len = cpu_to_le32(orig_len); @@ -308,21 +314,12 @@ encapsulate: if (skb->len < full_len) /* Pad */ skb_put(skb, full_len - skb->len); - /* The VL600 wants IPv6 packets to have an IPv4 ethertype - * Check if this is an IPv6 packet, and set the ethertype - * to 0x800 - */ - if ((skb->data[sizeof(struct vl600_pkt_hdr *) + 0x22] & 0xf0) == 0x60) { - skb->data[sizeof(struct vl600_pkt_hdr *) + 0x20] = 0x08; - skb->data[sizeof(struct vl600_pkt_hdr *) + 0x21] = 0; - } - return skb; } static const struct driver_info vl600_info = { .description = "LG VL600 modem", - .flags = FLAG_ETHER | FLAG_RX_ASSEMBLE, + .flags = FLAG_RX_ASSEMBLE | FLAG_WWAN, .bind = vl600_bind, .unbind = vl600_unbind, .status = usbnet_cdc_status, diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 22a7cf951e72..a5b9b12ef268 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -51,6 +51,7 @@ #define USB_VENDOR_ID_SMSC (0x0424) #define USB_PRODUCT_ID_LAN7500 (0x7500) #define USB_PRODUCT_ID_LAN7505 (0x7505) +#define RXW_PADDING 2 #define check_warn(ret, fmt, args...) \ ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) @@ -1088,13 +1089,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); le32_to_cpus(&rx_cmd_b); - skb_pull(skb, 4 + NET_IP_ALIGN); + skb_pull(skb, 4 + RXW_PADDING); packet = skb->data; /* get the packet length */ - size = (rx_cmd_a & RX_CMD_A_LEN) - NET_IP_ALIGN; - align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; + size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING; + align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { netif_dbg(dev, rx_err, dev->net, diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 2f91acccb7db..8873c6e6fb96 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1827,7 +1827,8 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) } /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */ - REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); + if (AR_SREV_9300_20_OR_LATER(ah)) + REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE); } /* diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 85fa9cc73502..65ecb5bab25a 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -254,6 +254,8 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy, int r; sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + if (!sband) + return; /* * If no country IE has been received always enable active scan diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index 58ea0e5fabfd..5f77cbe0b6aa 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c @@ -175,6 +175,7 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp, } } +/* TODO: verify if needed for SSLPN or LCN */ static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate) { const struct b43_phy *phy = &dev->phy; @@ -256,6 +257,9 @@ int b43_generate_txhdr(struct b43_wldev *dev, unsigned int plcp_fragment_len; u32 mac_ctl = 0; u16 phy_ctl = 0; + bool fill_phy_ctl1 = (phy->type == B43_PHYTYPE_LP || + phy->type == B43_PHYTYPE_N || + phy->type == B43_PHYTYPE_HT); u8 extra_ft = 0; struct ieee80211_rate *txrate; struct ieee80211_tx_rate *rates; @@ -531,7 +535,7 @@ int b43_generate_txhdr(struct b43_wldev *dev, extra_ft |= B43_TXH_EFT_RTSFB_CCK; if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS && - phy->type == B43_PHYTYPE_N) { + fill_phy_ctl1) { txhdr->phy_ctl1_rts = cpu_to_le16( b43_generate_tx_phy_ctl1(dev, rts_rate)); txhdr->phy_ctl1_rts_fb = cpu_to_le16( @@ -552,7 +556,7 @@ int b43_generate_txhdr(struct b43_wldev *dev, break; } - if (phy->type == B43_PHYTYPE_N) { + if (fill_phy_ctl1) { txhdr->phy_ctl1 = cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate)); txhdr->phy_ctl1_fb = @@ -736,7 +740,14 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) /* Link quality statistics */ switch (chanstat & B43_RX_CHAN_PHYTYPE) { + case B43_PHYTYPE_HT: + /* TODO: is max the right choice? */ + status.signal = max_t(__s8, + max(rxhdr->phy_ht_power0, rxhdr->phy_ht_power1), + rxhdr->phy_ht_power2); + break; case B43_PHYTYPE_N: + /* Broadcom has code for min and avg, but always uses max */ if (rxhdr->power0 == 16 || rxhdr->power0 == 32) status.signal = max(rxhdr->power1, rxhdr->power2); else diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h index 16c514d54afa..98d90747836a 100644 --- a/drivers/net/wireless/b43/xmit.h +++ b/drivers/net/wireless/b43/xmit.h @@ -249,6 +249,12 @@ struct b43_rxhdr_fw4 { } __packed; } __packed; union { + /* HT-PHY */ + struct { + PAD_BYTES(1); + __s8 phy_ht_power0; + } __packed; + /* RSSI for N-PHYs */ struct { __s8 power2; @@ -257,7 +263,15 @@ struct b43_rxhdr_fw4 { __le16 phy_status2; /* PHY RX Status 2 */ } __packed; - __le16 phy_status3; /* PHY RX Status 3 */ + union { + /* HT-PHY */ + struct { + __s8 phy_ht_power1; + __s8 phy_ht_power2; + } __packed; + + __le16 phy_status3; /* PHY RX Status 3 */ + } __packed; union { /* Tested with 598.314, 644.1001 and 666.2 */ struct { diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c index b56a30297c26..6ebec8f42846 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c @@ -358,13 +358,14 @@ static uint nrxdactive(struct dma_info *di, uint h, uint t) static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) { - uint dmactrlflags = di->dma.dmactrlflags; + uint dmactrlflags; if (di == NULL) { - DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name)); + DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n")); return 0; } + dmactrlflags = di->dma.dmactrlflags; dmactrlflags &= ~mask; dmactrlflags |= flags; diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index e12b48c2cff6..dd008b0e6417 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -191,6 +191,7 @@ static struct iwl_base_params iwl1000_base_params = { .chain_noise_scale = 1000, .wd_timeout = IWL_DEF_WD_TIMEOUT, .max_event_log_size = 128, + .wd_disable = true, }; static struct iwl_ht_params iwl1000_ht_params = { .ht_greenfield_support = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index c511c98a89a8..f55fb2d1af52 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -364,6 +364,7 @@ static struct iwl_base_params iwl5000_base_params = { .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .no_idle_support = true, + .wd_disable = true, }; static struct iwl_ht_params iwl5000_ht_params = { .ht_greenfield_support = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 58a381c01c89..a7a6def40d05 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -528,6 +528,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) return 0; } +void iwlagn_config_ht40(struct ieee80211_conf *conf, + struct iwl_rxon_context *ctx) +{ + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } +} + int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) { struct iwl_priv *priv = hw->priv; @@ -586,19 +604,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) ctx->ht.enabled = conf_is_ht(conf); if (ctx->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } + /* if HT40 is used, it should not change + * after associated except channel switch */ + if (iwl_is_associated_ctx(ctx) && + !ctx->ht.is_40mhz) + iwlagn_config_ht40(conf, ctx); } else ctx->ht.is_40mhz = false; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index ed6283623932..4b2aa1da0953 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c @@ -1268,9 +1268,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv, switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_TKIP: - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - if (sta) addr = sta->addr; else /* station mode case only */ @@ -1283,8 +1280,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv, seq.tkip.iv32, p1k, CMD_SYNC); break; case WLAN_CIPHER_SUITE_CCMP: - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - /* fall through */ case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: ret = iwlagn_send_sta_key(priv, keyconf, sta_id, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index ccba69b7f8a7..bacc06c95e7a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2316,6 +2316,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + /* fall through */ + case WLAN_CIPHER_SUITE_CCMP: + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + default: + break; + } + /* * We could program these keys into the hardware as well, but we * don't expect much multicast traffic in IBSS and having keys @@ -2599,21 +2610,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, /* Configure HT40 channels */ ctx->ht.enabled = conf_is_ht(conf); - if (ctx->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } - } else + if (ctx->ht.enabled) + iwlagn_config_ht40(conf, ctx); + else ctx->ht.is_40mhz = false; if ((le16_to_cpu(ctx->staging.channel) != ch)) @@ -3499,9 +3498,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])"); module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); -module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO); +module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO); MODULE_PARM_DESC(wd_disable, - "Disable stuck queue watchdog timer (default: 0 [enabled])"); + "Disable stuck queue watchdog timer 0=system default, " + "1=disable, 2=enable (default: 0)"); /* * set bt_coex_active to true, uCode will do kill/defer diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h index 5b936ec1a541..3856abaea507 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -86,6 +86,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes); +void iwlagn_config_ht40(struct ieee80211_conf *conf, + struct iwl_rxon_context *ctx); /* uCode */ int iwlagn_rx_calib_result(struct iwl_priv *priv, diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 001fdf140abb..fcf54160e4ed 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -1810,11 +1810,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv) { unsigned int timeout = priv->cfg->base_params->wd_timeout; - if (timeout && !iwlagn_mod_params.wd_disable) - mod_timer(&priv->watchdog, - jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); - else - del_timer(&priv->watchdog); + if (!iwlagn_mod_params.wd_disable) { + /* use system default */ + if (timeout && !priv->cfg->base_params->wd_disable) + mod_timer(&priv->watchdog, + jiffies + + msecs_to_jiffies(IWL_WD_TICK(timeout))); + else + del_timer(&priv->watchdog); + } else { + /* module parameter overwrite default configuration */ + if (timeout && iwlagn_mod_params.wd_disable == 2) + mod_timer(&priv->watchdog, + jiffies + + msecs_to_jiffies(IWL_WD_TICK(timeout))); + else + del_timer(&priv->watchdog); + } } /** diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 137da3380704..f2fc288f3dd3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -113,6 +113,7 @@ struct iwl_lib_ops { * @shadow_reg_enable: HW shadhow register bit * @no_idle_support: do not support idle mode * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up + * wd_disable: disable watchdog timer */ struct iwl_base_params { int eeprom_size; @@ -134,6 +135,7 @@ struct iwl_base_params { const bool shadow_reg_enable; const bool no_idle_support; const bool hd_v2; + const bool wd_disable; }; /* * @advanced_bt_coexist: support advanced bt coexist diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h index 1f7a93c67c45..14eaf37ce3b1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-shared.h +++ b/drivers/net/wireless/iwlwifi/iwl-shared.h @@ -120,7 +120,7 @@ extern struct iwl_mod_params iwlagn_mod_params; * @restart_fw: restart firmware, default = 1 * @plcp_check: enable plcp health check, default = true * @ack_check: disable ack health check, default = false - * @wd_disable: enable stuck queue check, default = false + * @wd_disable: enable stuck queue check, default = 0 * @bt_coex_active: enable bt coex, default = true * @led_mode: system default, default = 0 * @no_sleep_autoadjust: disable autoadjust, default = true @@ -141,7 +141,7 @@ struct iwl_mod_params { int restart_fw; bool plcp_check; bool ack_check; - bool wd_disable; + int wd_disable; bool bt_coex_active; int led_mode; bool no_sleep_autoadjust; diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index da3411057afc..ce918980e977 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c @@ -990,29 +990,16 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans) return 0; } -static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans) +static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) { unsigned long flags; - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + /* tell the device to stop sending interrupts */ spin_lock_irqsave(&trans->shrd->lock, flags); iwl_disable_interrupts(trans); spin_unlock_irqrestore(&trans->shrd->lock, flags); - /* wait to make sure we flush pending tasklet*/ - synchronize_irq(bus(trans)->irq); - tasklet_kill(&trans_pcie->irq_tasklet); -} - -static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) -{ - /* stop and reset the on-board processor */ - iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); - - /* tell the device to stop sending interrupts */ - iwl_trans_pcie_disable_sync_irq(trans); - /* device going down, Stop using ICT table */ iwl_disable_ict(trans); @@ -1039,6 +1026,20 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) /* Stop the device, and put it in low power state */ iwl_apm_stop(priv(trans)); + + /* Upon stop, the APM issues an interrupt if HW RF kill is set. + * Clean again the interrupt here + */ + spin_lock_irqsave(&trans->shrd->lock, flags); + iwl_disable_interrupts(trans); + spin_unlock_irqrestore(&trans->shrd->lock, flags); + + /* wait to make sure we flush pending tasklet*/ + synchronize_irq(bus(trans)->irq); + tasklet_kill(&trans_pcie->irq_tasklet); + + /* stop and reset the on-board processor */ + iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); } static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 4fcd653bddc4..a7f1ab28940d 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -634,7 +634,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, if (channel && !(channel->flags & IEEE80211_CHAN_DISABLED)) cfg80211_inform_bss(wiphy, channel, - bssid, le64_to_cpu(*(__le64 *)tsfdesc), + bssid, get_unaligned_le64(tsfdesc), capa, intvl, ie, ielen, LBS_SCAN_RSSI_TO_MBM(rssi), GFP_KERNEL); diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c index 11b69b300dc0..728baa445259 100644 --- a/drivers/net/wireless/libertas/if_spi.c +++ b/drivers/net/wireless/libertas/if_spi.c @@ -995,6 +995,7 @@ static int if_spi_host_to_card(struct lbs_private *priv, spin_unlock_irqrestore(&card->buffer_lock, flags); break; default: + kfree(packet); netdev_err(priv->dev, "can't transfer buffer of type %d\n", type); err = -EINVAL; diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index dae8dbb24a03..8d3ab378662b 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -819,8 +819,10 @@ mwifiex_scan_setup_scan_config(struct mwifiex_private *priv, wildcard_ssid_tlv->header.len = cpu_to_le16( (u16) (ssid_len + sizeof(wildcard_ssid_tlv-> max_ssid_length))); - wildcard_ssid_tlv->max_ssid_length = - user_scan_in->ssid_list[ssid_idx].max_len; + + /* max_ssid_length = 0 tells firmware to perform + specific scan for the SSID filled */ + wildcard_ssid_tlv->max_ssid_length = 0; memcpy(wildcard_ssid_tlv->ssid, user_scan_in->ssid_list[ssid_idx].ssid, @@ -1469,7 +1471,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid, s32 rssi, const u8 *ie_buf, size_t ie_len, u16 beacon_period, u16 cap_info_bitmap, u8 band) { - struct mwifiex_bssdescriptor *bss_desc = NULL; + struct mwifiex_bssdescriptor *bss_desc; int ret; unsigned long flags; u8 *beacon_ie; @@ -1484,6 +1486,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid, beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL); if (!beacon_ie) { + kfree(bss_desc); dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); return -ENOMEM; } diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index f18df82eeb92..78d0d6988553 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -588,8 +588,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev) WARN_ON(priv->fw_state != FW_STATE_READY); - cancel_work_sync(&priv->work); - p54spi_power_off(priv); spin_lock_irqsave(&priv->tx_lock, flags); INIT_LIST_HEAD(&priv->tx_pending); @@ -597,6 +595,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev) priv->fw_state = FW_STATE_OFF; mutex_unlock(&priv->mutex); + + cancel_work_sync(&priv->work); } static int __devinit p54spi_probe(struct spi_device *spi) @@ -656,6 +656,7 @@ static int __devinit p54spi_probe(struct spi_device *spi) init_completion(&priv->fw_comp); INIT_LIST_HEAD(&priv->tx_pending); mutex_init(&priv->mutex); + spin_lock_init(&priv->tx_lock); SET_IEEE80211_DEV(hw, &spi->dev); priv->common.open = p54spi_op_start; priv->common.stop = p54spi_op_stop; diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index d97a2caf582b..bc2ba80c47bb 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, dwrq->flags = 0; dwrq->length = 0; } - essid->octets[essid->length] = '\0'; + essid->octets[dwrq->length] = '\0'; memcpy(extra, essid->octets, dwrq->length); kfree(essid); diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 3f183a15186e..1ba079dffb11 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3771,7 +3771,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i) /* Apparently the data is read from end to start */ rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, ®); /* The returned value is in CPU order, but eeprom is le */ - rt2x00dev->eeprom[i] = cpu_to_le32(reg); + *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg); rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, ®); *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, ®); diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index f1565792f270..377876315b8d 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -919,6 +919,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x050d, 0x935b) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x00e8) }, + { USB_DEVICE(0x0411, 0x0158) }, { USB_DEVICE(0x0411, 0x016f) }, { USB_DEVICE(0x0411, 0x01a2) }, /* Corega */ diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 2ec5c00235e6..99ff12d0c29d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -943,6 +943,7 @@ struct rt2x00_dev { * Powersaving work */ struct delayed_work autowakeup_work; + struct work_struct sleep_work; /* * Data queue arrays for RX, TX, Beacon and ATIM. diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index e1fb2a8569be..edd317fa7c0a 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -465,6 +465,23 @@ static u8 *rt2x00lib_find_ie(u8 *data, unsigned int len, u8 ie) return NULL; } +static void rt2x00lib_sleep(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, sleep_work); + + if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) + return; + + /* + * Check again is powersaving is enabled, to prevent races from delayed + * work execution. + */ + if (!test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags)) + rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, + IEEE80211_CONF_CHANGE_PS); +} + static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct rxdone_entry_desc *rxdesc) @@ -512,8 +529,7 @@ static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev, cam |= (tim_ie->bitmap_ctrl & 0x01); if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags)) - rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, - IEEE80211_CONF_CHANGE_PS); + queue_work(rt2x00dev->workqueue, &rt2x00dev->sleep_work); } static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, @@ -1141,6 +1157,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup); + INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep); /* * Let the driver probe the device to detect the capabilities. @@ -1197,6 +1214,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) */ cancel_work_sync(&rt2x00dev->intf_work); cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); + cancel_work_sync(&rt2x00dev->sleep_work); if (rt2x00_is_usb(rt2x00dev)) { del_timer_sync(&rt2x00dev->txstatus_timer); cancel_work_sync(&rt2x00dev->rxdone_work); diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index db5262844543..55c8e50f45fd 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c @@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) if (mac->link_state != MAC80211_LINKED) return; - spin_lock(&rtlpriv->locks.lps_lock); + spin_lock_irq(&rtlpriv->locks.lps_lock); /* Idle for a while if we connect to AP a while ago. */ if (mac->cnt_after_linked >= 2) { @@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) } } - spin_unlock(&rtlpriv->locks.lps_lock); + spin_unlock_irq(&rtlpriv->locks.lps_lock); } /*Leave the leisure power save mode.*/ @@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + unsigned long flags; - spin_lock(&rtlpriv->locks.lps_lock); + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags); if (ppsc->fwctrl_lps) { if (ppsc->dot11_psmode != EACTIVE) { @@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw) rtl_lps_set_psmode(hw, EACTIVE); } } - spin_unlock(&rtlpriv->locks.lps_lock); + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags); } /* For sw LPS*/ @@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); } - spin_lock(&rtlpriv->locks.lps_lock); + spin_lock_irq(&rtlpriv->locks.lps_lock); rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); - spin_unlock(&rtlpriv->locks.lps_lock); + spin_unlock_irq(&rtlpriv->locks.lps_lock); } void rtl_swlps_rfon_wq_callback(void *data) @@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) if (rtlpriv->link_info.busytraffic) return; - spin_lock(&rtlpriv->locks.lps_lock); + spin_lock_irq(&rtlpriv->locks.lps_lock); rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); - spin_unlock(&rtlpriv->locks.lps_lock); + spin_unlock_irq(&rtlpriv->locks.lps_lock); if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c index 128ccb79318c..fc29c671cf3b 100644 --- a/drivers/net/wireless/wl12xx/scan.c +++ b/drivers/net/wireless/wl12xx/scan.c @@ -559,7 +559,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl, break; } /* Fail if SSID isn't present in the filters */ - if (j == req->n_ssids) { + if (j == cmd->n_ssids) { ret = -EINVAL; goto out_free; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 0cb594c86090..15e332d08c8d 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1021,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, pending_idx = *((u16 *)skb->data); xen_netbk_idx_release(netbk, pending_idx); for (j = start; j < i; j++) { - pending_idx = frag_get_pending_idx(&shinfo->frags[i]); + pending_idx = frag_get_pending_idx(&shinfo->frags[j]); xen_netbk_idx_release(netbk, pending_idx); } @@ -1668,7 +1668,7 @@ static int __init netback_init(void) "netback/%u", group); if (IS_ERR(netbk->task)) { - printk(KERN_ALERT "kthread_run() fails at netback\n"); + printk(KERN_ALERT "kthread_create() fails at netback\n"); del_timer(&netbk->net_timer); rc = PTR_ERR(netbk->task); goto failed_init; diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 6d3dd3988d0f..0f0cfa3bca30 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -26,11 +26,6 @@ #include <linux/string.h> #include <linux/slab.h> -/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */ -#ifndef NO_IRQ -#define NO_IRQ 0 -#endif - /** * irq_of_parse_and_map - Parse and map an interrupt into linux virq space * @device: Device node of the device whose interrupt is to be mapped @@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index) struct of_irq oirq; if (of_irq_map_one(dev, index, &oirq)) - return NO_IRQ; + return 0; return irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); @@ -60,27 +55,27 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map); */ struct device_node *of_irq_find_parent(struct device_node *child) { - struct device_node *p, *c = child; + struct device_node *p; const __be32 *parp; - if (!of_node_get(c)) + if (!of_node_get(child)) return NULL; do { - parp = of_get_property(c, "interrupt-parent", NULL); + parp = of_get_property(child, "interrupt-parent", NULL); if (parp == NULL) - p = of_get_parent(c); + p = of_get_parent(child); else { if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) p = of_node_get(of_irq_dflt_pic); else p = of_find_node_by_phandle(be32_to_cpup(parp)); } - of_node_put(c); - c = p; + of_node_put(child); + child = p; } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); - return (p == child) ? NULL : p; + return p; } /** @@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) /* Only dereference the resource if both the * resource and the irq are valid. */ - if (r && irq != NO_IRQ) { + if (r && irq) { r->start = r->end = irq; r->flags = IORESOURCE_IRQ; r->name = dev->full_name; @@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev) { int nr = 0; - while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ) + while (of_irq_to_resource(dev, nr, NULL)) nr++; return nr; @@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int i; for (i = 0; i < nr_irqs; i++, res++) - if (of_irq_to_resource(dev, i, res) == NO_IRQ) + if (!of_irq_to_resource(dev, i, res)) break; return i; @@ -424,6 +419,8 @@ void __init of_irq_init(const struct of_device_id *matches) desc->dev = np; desc->interrupt_parent = of_irq_find_parent(np); + if (desc->interrupt_parent == np) + desc->interrupt_parent = NULL; list_add_tail(&desc->list, &intc_desc_list); } diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index dccd8636095c..f8c752e408a6 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val) return err; } +static int timer_mode; + static int __init oprofile_init(void) { int err; + /* always init architecture to setup backtrace support */ err = oprofile_arch_init(&oprofile_ops); - if (err < 0 || timer) { - printk(KERN_INFO "oprofile: using timer interrupt.\n"); + + timer_mode = err || timer; /* fall back to timer mode on errors */ + if (timer_mode) { + if (!err) + oprofile_arch_exit(); err = oprofile_timer_init(&oprofile_ops); if (err) return err; } - return oprofilefs_register(); + + err = oprofilefs_register(); + if (!err) + return 0; + + /* failed */ + if (timer_mode) + oprofile_timer_exit(); + else + oprofile_arch_exit(); + + return err; } static void __exit oprofile_exit(void) { - oprofile_timer_exit(); oprofilefs_unregister(); - oprofile_arch_exit(); + if (timer_mode) + oprofile_timer_exit(); + else + oprofile_arch_exit(); } diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c index 3ef44624f510..878fba126582 100644 --- a/drivers/oprofile/timer_int.c +++ b/drivers/oprofile/timer_int.c @@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops) ops->start = oprofile_hrtimer_start; ops->stop = oprofile_hrtimer_stop; ops->cpu_type = "timer"; + printk(KERN_INFO "oprofile: using timer interrupt.\n"); return 0; } diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index b6f9749b4fa7..f02b5235056d 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -76,6 +76,7 @@ config PCI_IOV config PCI_PRI bool "PCI PRI support" + depends on PCI select PCI_ATS help PRI is the PCI Page Request Interface. It allows PCI devices that are diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 596172b4ae95..fce1c54a0c8d 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -459,8 +459,17 @@ static int add_bridge(acpi_handle handle) { acpi_status status; unsigned long long tmp; + struct acpi_pci_root *root; acpi_handle dummy_handle; + /* + * We shouldn't use this bridge if PCIe native hotplug control has been + * granted by the BIOS for it. + */ + root = acpi_pci_find_root(handle); + if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)) + return -ENODEV; + /* if the bridge doesn't have _STA, we assume it is always there */ status = acpi_get_handle(handle, "_STA", &dummy_handle); if (ACPI_SUCCESS(status)) { @@ -1376,13 +1385,23 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type, static acpi_status find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) { + struct acpi_pci_root *root; int *count = (int *)context; - if (acpi_is_root_bridge(handle)) { - acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, - handle_hotplug_event_bridge, NULL); - (*count)++; - } + if (!acpi_is_root_bridge(handle)) + return AE_OK; + + root = acpi_pci_find_root(handle); + if (!root) + return AE_OK; + + if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL) + return AE_OK; + + (*count)++; + acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, + handle_hotplug_event_bridge, NULL); + return AE_OK ; } diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 1e9c9aacc3a6..085dbb5fc168 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -213,9 +213,6 @@ static int board_added(struct slot *p_slot) goto err_exit; } - /* Wait for 1 second after checking link training status */ - msleep(1000); - /* Check for a power fault */ if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) { ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 96dc4734e4af..7b1414810ae3 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -280,6 +280,14 @@ int pciehp_check_link_status(struct controller *ctrl) else msleep(1000); + /* + * Need to wait for 1000 ms after Data Link Layer Link Active + * (DLLLA) bit reads 1b before sending configuration request. + * We need it before checking Link Training (LT) bit becuase + * LT is still set even after DLLLA bit is set on some platform. + */ + msleep(1000); + retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); if (retval) { ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); @@ -294,6 +302,16 @@ int pciehp_check_link_status(struct controller *ctrl) return retval; } + /* + * If the port supports Link speeds greater than 5.0 GT/s, we + * must wait for 100 ms after Link training completes before + * sending configuration request. + */ + if (ctrl->pcie->port->subordinate->max_bus_speed > PCIE_SPEED_5_0GT) + msleep(100); + + pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); + return retval; } @@ -484,7 +502,6 @@ int pciehp_power_on_slot(struct slot * slot) u16 slot_cmd; u16 cmd_mask; u16 slot_status; - u16 lnk_status; int retval = 0; /* Clear sticky power-fault bit from previous power failures */ @@ -516,14 +533,6 @@ int pciehp_power_on_slot(struct slot * slot) ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); - retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status); - if (retval) { - ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n", - __func__); - return retval; - } - pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status); - return retval; } diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index aca972bbfb4c..dd7e0c51a33e 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) static int is_shpc_capable(struct pci_dev *dev) { - if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == - PCI_DEVICE_ID_AMD_GOLAM_7450)) + if (dev->vendor == PCI_VENDOR_ID_AMD && + dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) return 1; if (!pci_find_capability(dev, PCI_CAP_ID_SHPC)) return 0; diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 36547f0ce305..75ba2311b54f 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ ctrl_dbg(ctrl, "Hotplug Controller:\n"); - if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == - PCI_DEVICE_ID_AMD_GOLAM_7450)) { + if (pdev->vendor == PCI_VENDOR_ID_AMD && + pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) { /* amd shpc driver doesn't use Base Offset; assume 0 */ ctrl->mmio_base = pci_resource_start(pdev, 0); ctrl->mmio_size = pci_resource_len(pdev, 0); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index ef566443f945..e17e2f8001d2 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -2,23 +2,17 @@ # PINCTRL infrastructure and drivers # -menuconfig PINCTRL - bool "PINCTRL Support" +config PINCTRL + bool depends on EXPERIMENTAL - help - This enables the PINCTRL subsystem for controlling pins - on chip packages, for example multiplexing pins on primarily - PGA and BGA packages for systems on chip. - - If unsure, say N. if PINCTRL +menu "Pin controllers" + depends on PINCTRL + config PINMUX bool "Support pinmux controllers" - help - Say Y here if you want the pincontrol subsystem to handle pin - multiplexing drivers. config DEBUG_PINCTRL bool "Debug PINCTRL calls" @@ -30,14 +24,12 @@ config PINMUX_SIRF bool "CSR SiRFprimaII pinmux driver" depends on ARCH_PRIMA2 select PINMUX - help - Say Y here to enable the SiRFprimaII pinmux driver config PINMUX_U300 bool "U300 pinmux driver" depends on ARCH_U300 select PINMUX - help - Say Y here to enable the U300 pinmux driver + +endmenu endif diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index f4e3d82379d7..7f43cf86d776 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -83,8 +83,10 @@ config DELL_LAPTOP depends on EXPERIMENTAL depends on BACKLIGHT_CLASS_DEVICE depends on RFKILL || RFKILL = n - depends on POWER_SUPPLY depends on SERIO_I8042 + select POWER_SUPPLY + select LEDS_CLASS + select NEW_LEDS default n ---help--- This driver adds support for rfkill and backlight control to Dell diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index a43cfd906c6d..d93e962f2610 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -589,14 +589,14 @@ static const struct backlight_ops dell_ops = { .update_status = dell_send_intensity, }; -static void touchpad_led_on() +static void touchpad_led_on(void) { int command = 0x97; char data = 1; i8042_command(&data, command | 1 << 12); } -static void touchpad_led_off() +static void touchpad_led_off(void) { int command = 0x97; char data = 2; diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 13ef8c37471d..dcdc1f4a4624 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -121,6 +121,7 @@ struct toshiba_acpi_dev { int illumination_supported:1; int video_supported:1; int fan_supported:1; + int system_event_supported:1; struct mutex mutex; }; @@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v) u32 hci_result; u32 value; - if (!dev->key_event_valid) { + if (!dev->key_event_valid && dev->system_event_supported) { hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); if (hci_result == HCI_SUCCESS) { dev->key_event_valid = 1; @@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev) /* enable event fifo */ hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); + if (hci_result == HCI_SUCCESS) + dev->system_event_supported = 1; props.type = BACKLIGHT_PLATFORM; props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; @@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) { struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); u32 hci_result, value; + int retries = 3; - if (event != 0x80) + if (!dev->system_event_supported || event != 0x80) return; + do { hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result); - if (hci_result == HCI_SUCCESS) { + switch (hci_result) { + case HCI_SUCCESS: if (value == 0x100) continue; /* act on key press; ignore key release */ @@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) pr_info("Unknown key %x\n", value); } - } else if (hci_result == HCI_NOT_SUPPORTED) { + break; + case HCI_NOT_SUPPORTED: /* This is a workaround for an unresolved issue on * some machines where system events sporadically * become disabled. */ hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result); pr_notice("Re-enabled hotkeys\n"); + /* fall through */ + default: + retries--; + break; } - } while (hci_result != HCI_EMPTY); + } while (retries && hci_result != HCI_EMPTY); } diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c index cffcb7c00b00..01fa671ec97f 100644 --- a/drivers/power/intel_mid_battery.c +++ b/drivers/power/intel_mid_battery.c @@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages."); #define PMIC_BATT_CHR_SBATDET_MASK (1 << 5) #define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6) #define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7) -#define PMIC_BATT_CHR_EXCPT_MASK 0xC6 +#define PMIC_BATT_CHR_EXCPT_MASK 0x86 + #define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31) #define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF @@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi) pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT); batt_exception = 1; - } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) { - pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; - pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; - pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT); - batt_exception = 1; } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) { pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT; pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; @@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi) batt_exception = 1; } else { pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD; + if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) { + /* PMIC will change charging current automatically */ + pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT); + } } } diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c index d9fb729535a1..fb7300837fee 100644 --- a/drivers/ps3/ps3-vuart.c +++ b/drivers/ps3/ps3-vuart.c @@ -952,7 +952,7 @@ static int ps3_vuart_bus_interrupt_get(void) } result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler, - IRQF_DISABLED, "vuart", &vuart_bus_priv); + 0, "vuart", &vuart_bus_priv); if (result) { pr_debug("%s:%d: request_irq failed (%d)\n", diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c index cc328dec946b..8c3f5adf1bc6 100644 --- a/drivers/ps3/ps3stor_lib.c +++ b/drivers/ps3/ps3stor_lib.c @@ -167,7 +167,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler) goto fail_close_device; } - error = request_irq(dev->irq, handler, IRQF_DISABLED, + error = request_irq(dev->irq, handler, 0, dev->sbd.core.driver->name, dev); if (error) { dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n", diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index cf3f9997546d..10451a15e828 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm) static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp) { - return 1; /* always round timer functions to one nanosecond */ + tp->tv_sec = 0; + tp->tv_nsec = 1; + return 0; } static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp) diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 5225930a10cd..691b1ab1a3d0 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv) INIT_WORK(&priv->idb_work, tsi721_db_dpc); /* Allocate buffer for inbound doorbells queue */ - priv->idb_base = dma_alloc_coherent(&priv->pdev->dev, + priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, &priv->idb_dma, GFP_KERNEL); if (!priv->idb_base) return -ENOMEM; - memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE); - dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", priv->idb_base, (unsigned long long)priv->idb_dma); @@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) */ /* Allocate space for DMA descriptors */ - bd_ptr = dma_alloc_coherent(&priv->pdev->dev, + bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, bd_num * sizeof(struct tsi721_dma_desc), &bd_phys, GFP_KERNEL); if (!bd_ptr) @@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) priv->bdma[chnum].bd_phys = bd_phys; priv->bdma[chnum].bd_base = bd_ptr; - memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc)); - dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", bd_ptr, (unsigned long long)bd_phys); @@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? bd_num : TSI721_DMA_MINSTSSZ; sts_size = roundup_pow_of_two(sts_size); - sts_ptr = dma_alloc_coherent(&priv->pdev->dev, + sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, sts_size * sizeof(struct tsi721_dma_sts), &sts_phys, GFP_KERNEL); if (!sts_ptr) { @@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) priv->bdma[chnum].sts_base = sts_ptr; priv->bdma[chnum].sts_size = sts_size; - memset(sts_ptr, 0, sts_size); - dev_dbg(&priv->pdev->dev, "desc status FIFO @ %p (phys = %llx) size=0x%x\n", sts_ptr, (unsigned long long)sts_phys, sts_size); @@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, /* Outbound message descriptor status FIFO allocation */ priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); - priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev, + priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); @@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, goto out_desc; } - memset(priv->omsg_ring[mbox].sts_base, 0, - entries * sizeof(struct tsi721_dma_sts)); - /* * Configure Outbound Messaging Engine */ @@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv) INIT_LIST_HEAD(&mport->dbells); rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); - rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0); - rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); + rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); + rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); strcpy(mport->name, "Tsi721 mport"); /* Hook up interrupt handler */ @@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tsi721_device *priv; - int i; + int i, cap; int err; u32 regval; @@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); } - /* Clear "no snoop" and "relaxed ordering" bits. */ - pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, ®val); - regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN); - pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval); + cap = pci_pcie_cap(pdev); + BUG_ON(cap == 0); + + /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ + pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, ®val); + regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | + PCI_EXP_DEVCTL_NOSNOOP_EN); + regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT; + pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval); + + /* Adjust PCIe completion timeout. */ + pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, ®val); + regval &= ~(0x0f); + pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2); /* * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index 58be4deb1402..822e54c394d5 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h @@ -72,6 +72,8 @@ #define TSI721_MSIXPBA_OFFSET 0x2a000 #define TSI721_PCIECFG_EPCTL 0x400 +#define MAX_READ_REQUEST_SZ_SHIFT 12 + /* * Event Management Registers */ diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c index 5abeb3ac3e8d..298c6c6a2795 100644 --- a/drivers/regulator/aat2870-regulator.c +++ b/drivers/regulator/aat2870-regulator.c @@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id) break; } - if (!ri) + if (i == ARRAY_SIZE(aat2870_regulators)) return NULL; ri->enable_addr = AAT2870_LDO_EN; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 669d02160221..938398f3e869 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev) list_del(&rdev->list); if (rdev->supply) regulator_put(rdev->supply); - device_unregister(&rdev->dev); kfree(rdev->constraints); + device_unregister(&rdev->dev); mutex_unlock(®ulator_list_mutex); } EXPORT_SYMBOL_GPL(regulator_unregister); diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index 66d2d60b436a..b552aae55b41 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -664,10 +664,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev, switch (id) { case TPS65910_REG_VDD1: - dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; + dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1; if (dcdc_mult == 1) dcdc_mult--; - vsel = (selector % VDD1_2_NUM_VOLTS) + 3; + vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3; tps65910_modify_bits(pmic, TPS65910_VDD1, (dcdc_mult << VDD1_VGAIN_SEL_SHIFT), @@ -675,10 +675,10 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev, tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel); break; case TPS65910_REG_VDD2: - dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; + dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1; if (dcdc_mult == 1) dcdc_mult--; - vsel = (selector % VDD1_2_NUM_VOLTS) + 3; + vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3; tps65910_modify_bits(pmic, TPS65910_VDD2, (dcdc_mult << VDD2_VGAIN_SEL_SHIFT), @@ -756,9 +756,9 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev, switch (id) { case TPS65910_REG_VDD1: case TPS65910_REG_VDD2: - mult = (selector / VDD1_2_NUM_VOLTS) + 1; + mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1; volt = VDD1_2_MIN_VOLT + - (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET; + (selector % VDD1_2_NUM_VOLT_FINE) * VDD1_2_OFFSET; break; case TPS65911_REG_VDDCTRL: volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); @@ -947,6 +947,8 @@ static __devinit int tps65910_probe(struct platform_device *pdev) if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) { pmic->desc[i].ops = &tps65910_ops_dcdc; + pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE * + VDD1_2_NUM_VOLT_COARSE; } else if (i == TPS65910_REG_VDD3) { if (tps65910_chip_id(tps65910) == TPS65910) pmic->desc[i].ops = &tps65910_ops_vdd3; diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index ee8747f4fa08..11cc308d66e9 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -71,6 +71,7 @@ struct twlreg_info { #define VREG_TYPE 1 #define VREG_REMAP 2 #define VREG_DEDICATED 3 /* LDO control */ +#define VREG_VOLTAGE_SMPS_4030 9 /* TWL6030 register offsets */ #define VREG_TRANS 1 #define VREG_STATE 2 @@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = { .get_status = twl4030reg_get_status, }; +static int +twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, + unsigned *selector) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int vsel = DIV_ROUND_UP(min_uV - 600000, 12500); + + twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030, + vsel); + return 0; +} + +static int twl4030smps_get_voltage(struct regulator_dev *rdev) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); + int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, + VREG_VOLTAGE_SMPS_4030); + + return vsel * 12500 + 600000; +} + +static struct regulator_ops twl4030smps_ops = { + .set_voltage = twl4030smps_set_voltage, + .get_voltage = twl4030smps_get_voltage, +}; + static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) { struct twlreg_info *info = rdev_get_drvdata(rdev); @@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = { }, \ } +#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \ + { \ + .base = offset, \ + .id = num, \ + .delay = turnon_delay, \ + .remap = remap_conf, \ + .desc = { \ + .name = #label, \ + .id = TWL4030_REG_##label, \ + .ops = &twl4030smps_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + }, \ + } + #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \ .base = offset, \ .min_mV = min_mVolts, \ @@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = { TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08), TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08), TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08), - TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08), - TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08), + TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08), + TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08), TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08), TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08), TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08), diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index e8326f26fa2f..dc4c2748bbc3 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg) */ delta = timespec_sub(old_system, old_rtc); delta_delta = timespec_sub(delta, old_delta); - if (abs(delta_delta.tv_sec) >= 2) { + if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) { /* * if delta_delta is too large, assume time correction * has occured and set old_delta to the current delta. @@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev) rtc_tm_to_time(&tm, &new_rtc.tv_sec); new_rtc.tv_nsec = 0; - if (new_rtc.tv_sec <= old_rtc.tv_sec) { - if (new_rtc.tv_sec < old_rtc.tv_sec) - pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); + if (new_rtc.tv_sec < old_rtc.tv_sec) { + pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); return 0; } @@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev) sleep_time = timespec_sub(sleep_time, timespec_sub(new_system, old_system)); - timekeeping_inject_sleeptime(&sleep_time); + if (sleep_time.tv_sec >= 0) + timekeeping_inject_sleeptime(&sleep_time); return 0; } diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 8e286259a007..fa4d9f324189 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -319,6 +319,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) } EXPORT_SYMBOL_GPL(rtc_read_alarm); +static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) +{ + int err; + + if (!rtc->ops) + err = -ENODEV; + else if (!rtc->ops->set_alarm) + err = -EINVAL; + else + err = rtc->ops->set_alarm(rtc->dev.parent, alarm); + + return err; +} + static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { struct rtc_time tm; @@ -342,14 +356,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) * over right here, before we set the alarm. */ - if (!rtc->ops) - err = -ENODEV; - else if (!rtc->ops->set_alarm) - err = -EINVAL; - else - err = rtc->ops->set_alarm(rtc->dev.parent, alarm); - - return err; + return ___rtc_set_alarm(rtc, alarm); } int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) @@ -763,6 +770,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) return 0; } +static void rtc_alarm_disable(struct rtc_device *rtc) +{ + struct rtc_wkalrm alarm; + struct rtc_time tm; + + __rtc_read_time(rtc, &tm); + + alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm), + ktime_set(300, 0))); + alarm.enabled = 0; + + ___rtc_set_alarm(rtc, &alarm); +} + /** * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue * @rtc rtc device @@ -784,8 +805,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) struct rtc_wkalrm alarm; int err; next = timerqueue_getnext(&rtc->timerqueue); - if (!next) + if (!next) { + rtc_alarm_disable(rtc); return; + } alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); @@ -847,7 +870,8 @@ again: err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) goto again; - } + } else + rtc_alarm_disable(rtc); mutex_unlock(&rtc->ops_lock); } diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index d33544802a2e..bb21f443fb70 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c @@ -76,12 +76,15 @@ static inline unsigned char vrtc_is_updating(void) /* * rtc_time's year contains the increment over 1900, but vRTC's YEAR * register can't be programmed to value larger than 0x64, so vRTC - * driver chose to use 1960 (1970 is UNIX time start point) as the base, + * driver chose to use 1972 (1970 is UNIX time start point) as the base, * and does the translation at read/write time. * - * Why not just use 1970 as the offset? it's because using 1960 will + * Why not just use 1970 as the offset? it's because using 1972 will * make it consistent in leap year setting for both vrtc and low-level - * physical rtc devices. + * physical rtc devices. Then why not use 1960 as the offset? If we use + * 1960, for a device's first use, its YEAR register is 0 and the system + * year will be parsed as 1960 which is not a valid UNIX time and will + * cause many applications to fail mysteriously. */ static int mrst_read_time(struct device *dev, struct rtc_time *time) { @@ -99,10 +102,10 @@ static int mrst_read_time(struct device *dev, struct rtc_time *time) time->tm_year = vrtc_cmos_read(RTC_YEAR); spin_unlock_irqrestore(&rtc_lock, flags); - /* Adjust for the 1960/1900 */ - time->tm_year += 60; + /* Adjust for the 1972/1900 */ + time->tm_year += 72; time->tm_mon--; - return RTC_24H; + return rtc_valid_tm(time); } static int mrst_set_time(struct device *dev, struct rtc_time *time) @@ -119,9 +122,9 @@ static int mrst_set_time(struct device *dev, struct rtc_time *time) min = time->tm_min; sec = time->tm_sec; - if (yrs < 70 || yrs > 138) + if (yrs < 72 || yrs > 138) return -EINVAL; - yrs -= 60; + yrs -= 72; spin_lock_irqsave(&rtc_lock, flags); diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c index b3eba3cddd42..e4b6880aabd0 100644 --- a/drivers/rtc/rtc-puv3.c +++ b/drivers/rtc/rtc-puv3.c @@ -220,7 +220,7 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en) } } -static int puv3_rtc_remove(struct platform_device *dev) +static int __devexit puv3_rtc_remove(struct platform_device *dev) { struct rtc_device *rtc = platform_get_drvdata(dev); @@ -236,7 +236,7 @@ static int puv3_rtc_remove(struct platform_device *dev) return 0; } -static int puv3_rtc_probe(struct platform_device *pdev) +static int __devinit puv3_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; struct resource *res; diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 7639ab906f02..5b979d9cc332 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) void __iomem *base = s3c_rtc_base; int year = tm->tm_year - 100; - clk_enable(rtc_clk); pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n", 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); @@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) return -EINVAL; } + clk_enable(rtc_clk); writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC); writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN); writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR); diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 43068fbd0baa..1b6d9247fdc7 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -641,6 +641,8 @@ static int __init zcore_init(void) if (ipl_info.type != IPL_TYPE_FCP_DUMP) return -ENODATA; + if (OLDMEM_BASE) + return -ENODATA; zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); debug_register_view(zcore_dbf, &debug_sprintf_view); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 75c3f1f8fd43..a84631a7391d 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) int chsc_chp_vary(struct chp_id chpid, int on) { struct channel_path *chp = chpid_to_chp(chpid); - struct chp_link link; - memset(&link, 0, sizeof(struct chp_link)); - link.chpid = chpid; /* Wait until previous actions have settled. */ css_wait_for_slow_path(); /* @@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on) /* Try to update the channel path descritor. */ chsc_determine_base_channel_path_desc(chpid, &chp->desc); for_each_subchannel_staged(s390_subchannel_vary_chpid_on, - __s390_vary_chpid_on, &link); + __s390_vary_chpid_on, &chpid); } else for_each_subchannel_staged(s390_subchannel_vary_chpid_off, - NULL, &link); + NULL, &chpid); return 0; } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 155a82bcb9e5..4a1ff5c2eb88 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -68,8 +68,13 @@ struct schib { __u8 mda[4]; /* model dependent area */ } __attribute__ ((packed,aligned(4))); +/* + * When rescheduled, todo's with higher values will overwrite those + * with lower values. + */ enum sch_todo { SCH_TODO_NOTHING, + SCH_TODO_EVAL, SCH_TODO_UNREG, }; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 92d7324acb1c..21908e67bf67 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch) } EXPORT_SYMBOL_GPL(css_sch_device_unregister); -static void css_sch_todo(struct work_struct *work) -{ - struct subchannel *sch; - enum sch_todo todo; - - sch = container_of(work, struct subchannel, todo_work); - /* Find out todo. */ - spin_lock_irq(sch->lock); - todo = sch->todo; - CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, - sch->schid.sch_no, todo); - sch->todo = SCH_TODO_NOTHING; - spin_unlock_irq(sch->lock); - /* Perform todo. */ - if (todo == SCH_TODO_UNREG) - css_sch_device_unregister(sch); - /* Release workqueue ref. */ - put_device(&sch->dev); -} - -/** - * css_sched_sch_todo - schedule a subchannel operation - * @sch: subchannel - * @todo: todo - * - * Schedule the operation identified by @todo to be performed on the slow path - * workqueue. Do nothing if another operation with higher priority is already - * scheduled. Needs to be called with subchannel lock held. - */ -void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) -{ - CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", - sch->schid.ssid, sch->schid.sch_no, todo); - if (sch->todo >= todo) - return; - /* Get workqueue ref. */ - if (!get_device(&sch->dev)) - return; - sch->todo = todo; - if (!queue_work(cio_work_q, &sch->todo_work)) { - /* Already queued, release workqueue ref. */ - put_device(&sch->dev); - } -} - static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) { int i; @@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow) css_schedule_eval(schid); } +/** + * css_sched_sch_todo - schedule a subchannel operation + * @sch: subchannel + * @todo: todo + * + * Schedule the operation identified by @todo to be performed on the slow path + * workqueue. Do nothing if another operation with higher priority is already + * scheduled. Needs to be called with subchannel lock held. + */ +void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) +{ + CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", + sch->schid.ssid, sch->schid.sch_no, todo); + if (sch->todo >= todo) + return; + /* Get workqueue ref. */ + if (!get_device(&sch->dev)) + return; + sch->todo = todo; + if (!queue_work(cio_work_q, &sch->todo_work)) { + /* Already queued, release workqueue ref. */ + put_device(&sch->dev); + } +} + +static void css_sch_todo(struct work_struct *work) +{ + struct subchannel *sch; + enum sch_todo todo; + int ret; + + sch = container_of(work, struct subchannel, todo_work); + /* Find out todo. */ + spin_lock_irq(sch->lock); + todo = sch->todo; + CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, + sch->schid.sch_no, todo); + sch->todo = SCH_TODO_NOTHING; + spin_unlock_irq(sch->lock); + /* Perform todo. */ + switch (todo) { + case SCH_TODO_NOTHING: + break; + case SCH_TODO_EVAL: + ret = css_evaluate_known_subchannel(sch, 1); + if (ret == -EAGAIN) { + spin_lock_irq(sch->lock); + css_sched_sch_todo(sch, todo); + spin_unlock_irq(sch->lock); + } + break; + case SCH_TODO_UNREG: + css_sch_device_unregister(sch); + break; + } + /* Release workqueue ref. */ + put_device(&sch->dev); +} + static struct idset *slow_subchannel_set; static spinlock_t slow_subchannel_lock; static wait_queue_head_t css_eval_wq; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index d734f4a0ecac..47269858ecb6 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev) */ cdev->private->flags.resuming = 1; cdev->private->path_new_mask = LPM_ANYPATH; - css_schedule_eval(sch->schid); + css_sched_sch_todo(sch, SCH_TODO_EVAL); spin_unlock_irq(sch->lock); - css_complete_work(); + css_wait_for_slow_path(); /* cdev may have been moved to a different subchannel. */ sch = to_subchannel(cdev->dev.parent); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 52c233fa2b12..1b853513c891 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev) cdev->private->pgid_reset_mask = 0; } -void -ccw_device_verify_done(struct ccw_device *cdev, int err) +static void create_fake_irb(struct irb *irb, int type) +{ + memset(irb, 0, sizeof(*irb)); + if (type == FAKE_CMD_IRB) { + struct cmd_scsw *scsw = &irb->scsw.cmd; + scsw->cc = 1; + scsw->fctl = SCSW_FCTL_START_FUNC; + scsw->actl = SCSW_ACTL_START_PEND; + scsw->stctl = SCSW_STCTL_STATUS_PEND; + } else if (type == FAKE_TM_IRB) { + struct tm_scsw *scsw = &irb->scsw.tm; + scsw->x = 1; + scsw->cc = 1; + scsw->fctl = SCSW_FCTL_START_FUNC; + scsw->actl = SCSW_ACTL_START_PEND; + scsw->stctl = SCSW_STCTL_STATUS_PEND; + } +} + +void ccw_device_verify_done(struct ccw_device *cdev, int err) { struct subchannel *sch; @@ -520,12 +538,8 @@ callback: ccw_device_done(cdev, DEV_STATE_ONLINE); /* Deliver fake irb to device driver, if needed. */ if (cdev->private->flags.fake_irb) { - memset(&cdev->private->irb, 0, sizeof(struct irb)); - cdev->private->irb.scsw.cmd.cc = 1; - cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC; - cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND; - cdev->private->irb.scsw.cmd.stctl = - SCSW_STCTL_STATUS_PEND; + create_fake_irb(&cdev->private->irb, + cdev->private->flags.fake_irb); cdev->private->flags.fake_irb = 0; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index f98698d5735e..ec7fb6d3b479 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, if (cdev->private->state == DEV_STATE_VERIFY) { /* Remember to fake irb when finished. */ if (!cdev->private->flags.fake_irb) { - cdev->private->flags.fake_irb = 1; + cdev->private->flags.fake_irb = FAKE_CMD_IRB; cdev->private->intparm = intparm; return 0; } else @@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, ret = cio_set_options (sch, flags); if (ret) return ret; - /* Adjust requested path mask to excluded varied off paths. */ + /* Adjust requested path mask to exclude unusable paths. */ if (lpm) { - lpm &= sch->opm; + lpm &= sch->lpm; if (lpm == 0) return -EACCES; } @@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, sch = to_subchannel(cdev->dev.parent); if (!sch->schib.pmcw.ena) return -EINVAL; + if (cdev->private->state == DEV_STATE_VERIFY) { + /* Remember to fake irb when finished. */ + if (!cdev->private->flags.fake_irb) { + cdev->private->flags.fake_irb = FAKE_TM_IRB; + cdev->private->intparm = intparm; + return 0; + } else + /* There's already a fake I/O around. */ + return -EBUSY; + } if (cdev->private->state != DEV_STATE_ONLINE) return -EIO; - /* Adjust requested path mask to excluded varied off paths. */ + /* Adjust requested path mask to exclude unusable paths. */ if (lpm) { - lpm &= sch->opm; + lpm &= sch->lpm; if (lpm == 0) return -EACCES; } diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 2ebb492a5c17..76253dfcc1be 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -111,6 +111,9 @@ enum cdev_todo { CDEV_TODO_UNREG_EVAL, }; +#define FAKE_CMD_IRB 1 +#define FAKE_TM_IRB 2 + struct ccw_device_private { struct ccw_device *cdev; struct subchannel *sch; @@ -138,7 +141,7 @@ struct ccw_device_private { unsigned int doverify:1; /* delayed path verification */ unsigned int donotify:1; /* call notify function */ unsigned int recog_done:1; /* dev. recog. complete */ - unsigned int fake_irb:1; /* deliver faked irb */ + unsigned int fake_irb:2; /* deliver faked irb */ unsigned int resuming:1; /* recognition while resume */ unsigned int pgroup:1; /* pathgroup is set up */ unsigned int mpath:1; /* multipathing is set up */ diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index b77ae519d79c..96bbe9d12a79 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1271,18 +1271,16 @@ ap_config_timeout(unsigned long ptr) } /** - * ap_schedule_poll_timer(): Schedule poll timer. + * __ap_schedule_poll_timer(): Schedule poll timer. * * Set up the timer to run the poll tasklet */ -static inline void ap_schedule_poll_timer(void) +static inline void __ap_schedule_poll_timer(void) { ktime_t hr_time; spin_lock_bh(&ap_poll_timer_lock); - if (ap_using_interrupts() || ap_suspend_flag) - goto out; - if (hrtimer_is_queued(&ap_poll_timer)) + if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag) goto out; if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { hr_time = ktime_set(0, poll_timeout); @@ -1294,6 +1292,18 @@ out: } /** + * ap_schedule_poll_timer(): Schedule poll timer. + * + * Set up the timer to run the poll tasklet + */ +static inline void ap_schedule_poll_timer(void) +{ + if (ap_using_interrupts()) + return; + __ap_schedule_poll_timer(); +} + +/** * ap_poll_read(): Receive pending reply messages from an AP device. * @ap_dev: pointer to the AP device * @flags: pointer to control flags, bit 2^0 is set if another poll is @@ -1374,8 +1384,9 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) *flags |= 1; *flags |= 2; break; - case AP_RESPONSE_Q_FULL: case AP_RESPONSE_RESET_IN_PROGRESS: + __ap_schedule_poll_timer(); + case AP_RESPONSE_Q_FULL: *flags |= 2; break; case AP_RESPONSE_MESSAGE_TOO_BIG: @@ -1541,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev) rc = ap_init_queue(ap_dev->qid); if (rc == -ENODEV) ap_dev->unregistered = 1; + else + __ap_schedule_poll_timer(); } static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index fa80ba1f0344..9b66d2d1809b 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -4,7 +4,7 @@ menu "S/390 network device drivers" config LCS def_tristate m prompt "Lan Channel Station Interface" - depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI) + depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI) help Select this option if you want to use LCS networking on IBM System z. This device driver supports Token Ring (IEEE 802.5), diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index c28713da1ec5..863fc2197155 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -50,7 +50,7 @@ #include "lcs.h" -#if !defined(CONFIG_NET_ETHERNET) && \ +#if !defined(CONFIG_ETHERNET) && \ !defined(CONFIG_TR) && !defined(CONFIG_FDDI) #error Cannot compile lcs.c without some net devices switched on. #endif @@ -1634,7 +1634,7 @@ lcs_startlan_auto(struct lcs_card *card) int rc; LCS_DBF_TEXT(2, trace, "strtauto"); -#ifdef CONFIG_NET_ETHERNET +#ifdef CONFIG_ETHERNET card->lan_type = LCS_FRAME_TYPE_ENET; rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); if (rc == 0) @@ -2166,7 +2166,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) goto netdev_out; } switch (card->lan_type) { -#ifdef CONFIG_NET_ETHERNET +#ifdef CONFIG_ETHERNET case LCS_FRAME_TYPE_ENET: card->lan_type_trans = eth_type_trans; dev = alloc_etherdev(0); diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 3251333a23df..b6a6356d09b3 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1994,6 +1994,8 @@ static struct net_device *netiucv_init_netdevice(char *username) netiucv_setup_netdevice); if (!dev) return NULL; + if (dev_alloc_name(dev, dev->name) < 0) + goto out_netdev; privptr = netdev_priv(dev); privptr->fsm = init_fsm("netiucvdev", dev_state_names, diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index b77c65ed1381..4abc79d3963f 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -236,8 +236,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, #define QETH_IN_BUF_COUNT_MAX 128 #define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) #define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ - ((card)->ssqd.qdioac1 & AC1_SIGA_INPUT_NEEDED ? 1 : \ - ((card)->qdio.in_buf_pool.buf_count / 2)) + ((card)->qdio.in_buf_pool.buf_count / 2) /* buffers we have to be behind before we get a PCI */ #define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 81534437373a..fff57de78943 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -881,7 +881,6 @@ EXPORT_SYMBOL_GPL(qeth_do_run_thread); void qeth_schedule_recovery(struct qeth_card *card) { QETH_CARD_TEXT(card, 2, "startrec"); - WARN_ON(1); if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) schedule_work(&card->kernel_thread_starter); } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e4c1176ee25b..4d5307ddbe55 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2756,11 +2756,13 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) struct neighbour *n = NULL; struct dst_entry *dst; + rcu_read_lock(); dst = skb_dst(skb); if (dst) n = dst_get_neighbour(dst); if (n) { cast_type = n->type; + rcu_read_unlock(); if ((cast_type == RTN_BROADCAST) || (cast_type == RTN_MULTICAST) || (cast_type == RTN_ANYCAST)) @@ -2768,6 +2770,8 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) else return RTN_UNSPEC; } + rcu_read_unlock(); + /* try something else */ if (skb->protocol == ETH_P_IPV6) return (skb_network_header(skb)[24] == 0xff) ? @@ -2847,6 +2851,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, } hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr); + + rcu_read_lock(); dst = skb_dst(skb); if (dst) n = dst_get_neighbour(dst); @@ -2893,6 +2899,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, QETH_CAST_UNICAST | QETH_HDR_PASSTHRU; } } + rcu_read_unlock(); } static inline void qeth_l3_hdr_csum(struct qeth_card *card, diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 0ea2fbfe0e99..d979bb26522f 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -335,10 +335,10 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, QETH_IN_BUF_COUNT_MAX) qeth_realloc_buffer_pool(card, QETH_IN_BUF_COUNT_MAX); - break; } else rc = -EPERM; - default: /* fall through */ + break; + default: rc = -EINVAL; } out: diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 4aa76d6f11df..705e13e470af 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -38,6 +38,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> +#include <linux/pci-aspm.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/spinlock.h> @@ -1109,6 +1110,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, unique_id++; } + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + error = pci_enable_device(pdev); if (error) goto out; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index e76107b2ade3..865d452542be 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -23,6 +23,7 @@ #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> +#include <linux/pci-aspm.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> @@ -3922,6 +3923,10 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h) dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); return -ENODEV; } + + pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); + err = pci_enable_device(h->pdev); if (err) { dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 8889b1babcac..4e041f6d808c 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2802,6 +2802,11 @@ _scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc) if (ioc->is_driver_loading) return; + + fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); + if (!fw_event) + return; + fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES; fw_event->ioc = ioc; _scsih_fw_event_add(ioc, fw_event); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 06bc26554a67..f85cfa6c47b5 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1409,6 +1409,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) blk_start_request(req); + scmd_printk(KERN_INFO, cmd, "killing request\n"); + sdev = cmd->device; starget = scsi_target(sdev); shost = sdev->host; @@ -1490,7 +1492,6 @@ static void scsi_request_fn(struct request_queue *q) struct request *req; if (!sdev) { - printk("scsi: killing requests for dead queue\n"); while ((req = blk_peek_request(q)) != NULL) scsi_kill_request(req, q); return; diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 72273a0e5666..b3c6d957fbd8 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -319,11 +319,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, return sdev; out_device_destroy: - scsi_device_set_state(sdev, SDEV_DEL); - transport_destroy_device(&sdev->sdev_gendev); - put_device(&sdev->sdev_dev); - scsi_free_queue(sdev->request_queue); - put_device(&sdev->sdev_gendev); + __scsi_remove_device(sdev); out: if (display_failure_msg) printk(ALLOC_FAILURE_MSG, __func__); diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile index 24e6cec0ae8d..67e272ab1623 100644 --- a/drivers/sh/Makefile +++ b/drivers/sh/Makefile @@ -7,3 +7,11 @@ obj-$(CONFIG_HAVE_CLK) += clk/ obj-$(CONFIG_MAPLE) += maple/ obj-$(CONFIG_SUPERHYWAY) += superhyway/ obj-$(CONFIG_GENERIC_GPIO) += pfc.o + +# +# For the moment we only use this framework for ARM-based SH/R-Mobile +# platforms and generic SH. SH-based SH-Mobile platforms are still using +# an older framework that is pending up-porting, at which point this +# special casing can go away. +# +obj-$(CONFIG_SUPERH)$(CONFIG_ARCH_SHMOBILE) += pm_runtime.o diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index dc8d022c07a1..db257a35e71a 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -25,7 +25,6 @@ #include <linux/seq_file.h> #include <linux/err.h> #include <linux/io.h> -#include <linux/debugfs.h> #include <linux/cpufreq.h> #include <linux/clk.h> #include <linux/sh_clk.h> @@ -173,6 +172,26 @@ long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, return clk_rate_round_helper(&div_range_round); } +static long clk_rate_mult_range_iter(unsigned int pos, + struct clk_rate_round_data *rounder) +{ + return clk_get_rate(rounder->arg) * pos; +} + +long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min, + unsigned int mult_max, unsigned long rate) +{ + struct clk_rate_round_data mult_range_round = { + .min = mult_min, + .max = mult_max, + .func = clk_rate_mult_range_iter, + .arg = clk_get_parent(clk), + .rate = rate, + }; + + return clk_rate_round_helper(&mult_range_round); +} + int clk_rate_table_find(struct clk *clk, struct cpufreq_frequency_table *freq_table, unsigned long rate) @@ -205,9 +224,6 @@ int clk_reparent(struct clk *child, struct clk *parent) list_add(&child->sibling, &parent->children); child->parent = parent; - /* now do the debugfs renaming to reattach the child - to the proper parent */ - return 0; } @@ -665,89 +681,6 @@ static int __init clk_syscore_init(void) subsys_initcall(clk_syscore_init); #endif -/* - * debugfs support to trace clock tree hierarchy and attributes - */ -static struct dentry *clk_debugfs_root; - -static int clk_debugfs_register_one(struct clk *c) -{ - int err; - struct dentry *d; - struct clk *pa = c->parent; - char s[255]; - char *p = s; - - p += sprintf(p, "%p", c); - d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root); - if (!d) - return -ENOMEM; - c->dentry = d; - - d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount); - if (!d) { - err = -ENOMEM; - goto err_out; - } - d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate); - if (!d) { - err = -ENOMEM; - goto err_out; - } - d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags); - if (!d) { - err = -ENOMEM; - goto err_out; - } - return 0; - -err_out: - debugfs_remove_recursive(c->dentry); - return err; -} - -static int clk_debugfs_register(struct clk *c) -{ - int err; - struct clk *pa = c->parent; - - if (pa && !pa->dentry) { - err = clk_debugfs_register(pa); - if (err) - return err; - } - - if (!c->dentry) { - err = clk_debugfs_register_one(c); - if (err) - return err; - } - return 0; -} - -static int __init clk_debugfs_init(void) -{ - struct clk *c; - struct dentry *d; - int err; - - d = debugfs_create_dir("clock", NULL); - if (!d) - return -ENOMEM; - clk_debugfs_root = d; - - list_for_each_entry(c, &clock_list, node) { - err = clk_debugfs_register(c); - if (err) - goto err_out; - } - return 0; -err_out: - debugfs_remove_recursive(clk_debugfs_root); - return err; -} -late_initcall(clk_debugfs_init); - static int __init clk_late_init(void) { unsigned long flags; diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c new file mode 100644 index 000000000000..afe9282629b9 --- /dev/null +++ b/drivers/sh/pm_runtime.c @@ -0,0 +1,65 @@ +/* + * Runtime PM support code + * + * Copyright (C) 2009-2010 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/pm_runtime.h> +#include <linux/pm_domain.h> +#include <linux/pm_clock.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/sh_clk.h> +#include <linux/bitmap.h> +#include <linux/slab.h> + +#ifdef CONFIG_PM_RUNTIME + +static int default_platform_runtime_idle(struct device *dev) +{ + /* suspend synchronously to disable clocks immediately */ + return pm_runtime_suspend(dev); +} + +static struct dev_pm_domain default_pm_domain = { + .ops = { + .runtime_suspend = pm_clk_suspend, + .runtime_resume = pm_clk_resume, + .runtime_idle = default_platform_runtime_idle, + USE_PLATFORM_PM_SLEEP_OPS + }, +}; + +#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain) + +#else + +#define DEFAULT_PM_DOMAIN_PTR NULL + +#endif /* CONFIG_PM_RUNTIME */ + +static struct pm_clk_notifier_block platform_bus_notifier = { + .pm_domain = DEFAULT_PM_DOMAIN_PTR, + .con_ids = { NULL, }, +}; + +static int __init sh_pm_runtime_init(void) +{ + pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); + return 0; +} +core_initcall(sh_pm_runtime_init); + +static int __init sh_pm_runtime_late_init(void) +{ + pm_genpd_poweroff_unused(); + return 0; +} +late_initcall(sh_pm_runtime_late_init); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index a1fd73df5416..8ba4510a9519 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -199,7 +199,7 @@ config SPI_FSL_LIB depends on FSL_SOC config SPI_FSL_SPI - tristate "Freescale SPI controller" + bool "Freescale SPI controller" depends on FSL_SOC select SPI_FSL_LIB help @@ -208,7 +208,7 @@ config SPI_FSL_SPI MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. config SPI_FSL_ESPI - tristate "Freescale eSPI controller" + bool "Freescale eSPI controller" depends on FSL_SOC select SPI_FSL_LIB help diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index 024b48aed5ca..acc88b4d2869 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -13,6 +13,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 79665e2e6ec5..16d6a839c7fa 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -907,7 +907,7 @@ static void atmel_spi_cleanup(struct spi_device *spi) /*-------------------------------------------------------------------------*/ -static int __init atmel_spi_probe(struct platform_device *pdev) +static int __devinit atmel_spi_probe(struct platform_device *pdev) { struct resource *regs; int irq; @@ -1003,7 +1003,7 @@ out_free: return ret; } -static int __exit atmel_spi_remove(struct platform_device *pdev) +static int __devexit atmel_spi_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct atmel_spi *as = spi_master_get_devdata(master); @@ -1072,6 +1072,7 @@ static struct platform_driver atmel_spi_driver = { }, .suspend = atmel_spi_suspend, .resume = atmel_spi_resume, + .probe = atmel_spi_probe, .remove = __exit_p(atmel_spi_remove), }; module_platform_driver(atmel_spi_driver); diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index e093d3ec41ba..0094c645ff0d 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi) spi_bitbang_cleanup(spi); } -static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) +static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in) { int value; @@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in) return value; } -static int __init +static int __devinit spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label, u16 *res_flags) { diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c index e763254741c2..182e9c873822 100644 --- a/drivers/spi/spi-nuc900.c +++ b/drivers/spi/spi-nuc900.c @@ -8,6 +8,7 @@ * */ +#include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/workqueue.h> @@ -426,7 +427,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev) goto err_clk; } - mfp_set_groupg(&pdev->dev); + mfp_set_groupg(&pdev->dev, NULL); nuc900_init_spi(hw); err = spi_bitbang_start(&hw->bitbang); diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index f103e470cb63..5559b2299198 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -2184,6 +2184,12 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) goto err_clk_prep; } + status = clk_enable(pl022->clk); + if (status) { + dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n"); + goto err_no_clk_en; + } + /* Disable SSP */ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); @@ -2237,6 +2243,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) free_irq(adev->irq[0], pl022); err_no_irq: + clk_disable(pl022->clk); + err_no_clk_en: clk_unprepare(pl022->clk); err_clk_prep: clk_put(pl022->clk); diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 21d8c1c16cd8..5e78c77d5a08 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev, } insns = - kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL); + kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL); if (!insns) { DPRINTK("kmalloc failed\n"); ret = -ENOMEM; @@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s) return ret; } -static void comedi_unmap(struct vm_area_struct *area) + +static void comedi_vm_open(struct vm_area_struct *area) +{ + struct comedi_async *async; + struct comedi_device *dev; + + async = area->vm_private_data; + dev = async->subdevice->device; + + mutex_lock(&dev->mutex); + async->mmap_count++; + mutex_unlock(&dev->mutex); +} + +static void comedi_vm_close(struct vm_area_struct *area) { struct comedi_async *async; struct comedi_device *dev; @@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area) } static struct vm_operations_struct comedi_vm_ops = { - .close = comedi_unmap, + .open = comedi_vm_open, + .close = comedi_vm_close, }; static int comedi_mmap(struct file *file, struct vm_area_struct *vma) { const unsigned minor = iminor(file->f_dentry->d_inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; struct comedi_async *async = NULL; unsigned long start = vma->vm_start; unsigned long size; @@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma) int i; int retval; struct comedi_subdevice *s; + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + + dev_file_info = comedi_get_device_file_info(minor); + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; mutex_lock(&dev->mutex); if (!dev->attached) { @@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait) { unsigned int mask = 0; const unsigned minor = iminor(file->f_dentry->d_inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; struct comedi_subdevice *read_subdev; struct comedi_subdevice *write_subdev; + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + dev_file_info = comedi_get_device_file_info(minor); + + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; mutex_lock(&dev->mutex); if (!dev->attached) { @@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, int n, m, count = 0, retval = 0; DECLARE_WAITQUEUE(wait, current); const unsigned minor = iminor(file->f_dentry->d_inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + dev_file_info = comedi_get_device_file_info(minor); + + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; if (!dev->attached) { DPRINTK("no driver configured on comedi%i\n", dev->minor); @@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, retval = -EAGAIN; break; } + schedule(); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } - schedule(); if (!s->busy) break; if (s->busy != file) { @@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, int n, m, count = 0, retval = 0; DECLARE_WAITQUEUE(wait, current); const unsigned minor = iminor(file->f_dentry->d_inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + dev_file_info = comedi_get_device_file_info(minor); + + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; if (!dev->attached) { DPRINTK("no driver configured on comedi%i\n", dev->minor); @@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, retval = -EAGAIN; break; } + schedule(); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } - schedule(); if (!s->busy) { retval = 0; break; @@ -1885,11 +1924,17 @@ ok: static int comedi_close(struct inode *inode, struct file *file) { const unsigned minor = iminor(inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; struct comedi_subdevice *s = NULL; int i; + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + dev_file_info = comedi_get_device_file_info(minor); + + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; mutex_lock(&dev->mutex); @@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file) static int comedi_fasync(int fd, struct file *file, int on) { const unsigned minor = iminor(file->f_dentry->d_inode); - struct comedi_device_file_info *dev_file_info = - comedi_get_device_file_info(minor); + struct comedi_device_file_info *dev_file_info; + struct comedi_device *dev; + dev_file_info = comedi_get_device_file_info(minor); - struct comedi_device *dev = dev_file_info->device; + if (dev_file_info == NULL) + return -ENODEV; + dev = dev_file_info->device; + if (dev == NULL) + return -ENODEV; return fasync_helper(fd, file, on, &dev->async_queue); } diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c index a8fea9a91733..6144afb8cbaa 100644 --- a/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/drivers/staging/comedi/drivers/usbduxsigma.c @@ -1,4 +1,4 @@ -#define DRIVER_VERSION "v0.5" +#define DRIVER_VERSION "v0.6" #define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com" #define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com" /* @@ -25,7 +25,7 @@ Driver: usbduxsigma Description: University of Stirling USB DAQ & INCITE Technology Limited Devices: [ITL] USB-DUX (usbduxsigma.o) Author: Bernd Porr <BerndPorr@f2s.com> -Updated: 21 Jul 2011 +Updated: 8 Nov 2011 Status: testing */ /* @@ -44,6 +44,7 @@ Status: testing * 0.3: proper vendor ID and driver name * 0.4: fixed D/A voltage range * 0.5: various bug fixes, health check at startup + * 0.6: corrected wrong input range */ /* generates loads of debug info */ @@ -175,7 +176,7 @@ Status: testing /* comedi constants */ static const struct comedi_lrange range_usbdux_ai_range = { 1, { BIP_RANGE - (2.65) + (2.65/2.0) } }; diff --git a/drivers/staging/et131x/Kconfig b/drivers/staging/et131x/Kconfig index 9e1864c6dfd0..8190f2aaf53b 100644 --- a/drivers/staging/et131x/Kconfig +++ b/drivers/staging/et131x/Kconfig @@ -1,6 +1,7 @@ config ET131X tristate "Agere ET-1310 Gigabit Ethernet support" - depends on PCI + depends on PCI && NET && NETDEVICES + select PHYLIB default n ---help--- This driver supports Agere ET-1310 ethernet adapters. diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c index f5f44a02456f..0c1c6ca8c379 100644 --- a/drivers/staging/et131x/et131x.c +++ b/drivers/staging/et131x/et131x.c @@ -4469,6 +4469,12 @@ static int et131x_resume(struct device *dev) return 0; } +static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); +#define ET131X_PM_OPS (&et131x_pm_ops) +#else +#define ET131X_PM_OPS NULL +#endif + /* ISR functions */ /** @@ -5470,12 +5476,6 @@ err_out: return result; } -static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); -#define ET131X_PM_OPS (&et131x_pm_ops) -#else -#define ET131X_PM_OPS NULL -#endif - static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c index 326e967d54ef..aec9311b108c 100644 --- a/drivers/staging/iio/industrialio-core.c +++ b/drivers/staging/iio/industrialio-core.c @@ -242,19 +242,26 @@ static const struct file_operations iio_event_chrdev_fileops = { static int iio_event_getfd(struct iio_dev *indio_dev) { - if (indio_dev->event_interface == NULL) + struct iio_event_interface *ev_int = indio_dev->event_interface; + int fd; + + if (ev_int == NULL) return -ENODEV; - mutex_lock(&indio_dev->event_interface->event_list_lock); - if (test_and_set_bit(IIO_BUSY_BIT_POS, - &indio_dev->event_interface->flags)) { - mutex_unlock(&indio_dev->event_interface->event_list_lock); + mutex_lock(&ev_int->event_list_lock); + if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { + mutex_unlock(&ev_int->event_list_lock); return -EBUSY; } - mutex_unlock(&indio_dev->event_interface->event_list_lock); - return anon_inode_getfd("iio:event", - &iio_event_chrdev_fileops, - indio_dev->event_interface, O_RDONLY); + mutex_unlock(&ev_int->event_list_lock); + fd = anon_inode_getfd("iio:event", + &iio_event_chrdev_fileops, ev_int, O_RDONLY); + if (fd < 0) { + mutex_lock(&ev_int->event_list_lock); + clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); + mutex_unlock(&ev_int->event_list_lock); + } + return fd; } static int __init iio_init(void) diff --git a/drivers/staging/media/as102/as102_drv.c b/drivers/staging/media/as102/as102_drv.c index d335c7d6fa0f..828526d4c289 100644 --- a/drivers/staging/media/as102/as102_drv.c +++ b/drivers/staging/media/as102/as102_drv.c @@ -32,8 +32,8 @@ #include "as102_fw.h" #include "dvbdev.h" -int debug; -module_param_named(debug, debug, int, 0644); +int as102_debug; +module_param_named(debug, as102_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default: off)"); int dual_tuner; diff --git a/drivers/staging/media/as102/as102_drv.h b/drivers/staging/media/as102/as102_drv.h index bcda635b5a99..fd33f5a12dcc 100644 --- a/drivers/staging/media/as102/as102_drv.h +++ b/drivers/staging/media/as102/as102_drv.h @@ -37,7 +37,8 @@ extern struct spi_driver as102_spi_driver; #define DRIVER_FULL_NAME "Abilis Systems as10x usb driver" #define DRIVER_NAME "as10x_usb" -extern int debug; +extern int as102_debug; +#define debug as102_debug #define dprintk(debug, args...) \ do { if (debug) { \ diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index b445cd63f901..2542c3743904 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -275,7 +275,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; - hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset)); + hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) + fs->page_offset)); hw_buffer.s.size = fs->size; CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; } diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c index 480b0ed2e4de..115635f95024 100644 --- a/drivers/staging/rts_pstor/rtsx.c +++ b/drivers/staging/rts_pstor/rtsx.c @@ -1021,6 +1021,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci, th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); if (IS_ERR(th)) { printk(KERN_ERR "Unable to start the device-scanning thread\n"); + complete(&dev->scanning_done); quiesce_and_remove_host(dev); err = PTR_ERR(th); goto errout; diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig index 5cde96b2e6e1..5c2a15b42dfe 100644 --- a/drivers/staging/slicoss/Kconfig +++ b/drivers/staging/slicoss/Kconfig @@ -1,6 +1,6 @@ config SLICOSS tristate "Alacritech Gigabit IS-NIC support" - depends on PCI && X86 + depends on PCI && X86 && NET default n help This driver supports Alacritech's IS-NIC gigabit ethernet cards. diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c index 09c44abb89e8..3872b8cccdcf 100644 --- a/drivers/staging/usbip/vhci_rx.c +++ b/drivers/staging/usbip/vhci_rx.c @@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, { struct usbip_device *ud = &vdev->ud; struct urb *urb; + unsigned long flags; spin_lock(&vdev->priv_lock); urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); @@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, usbip_dbg_vhci_rx("now giveback urb %p\n", urb); - spin_lock(&the_controller->lock); + spin_lock_irqsave(&the_controller->lock, flags); usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); - spin_unlock(&the_controller->lock); + spin_unlock_irqrestore(&the_controller->lock, flags); usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); @@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, { struct vhci_unlink *unlink; struct urb *urb; + unsigned long flags; usbip_dump_header(pdu); @@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, urb->status = pdu->u.ret_unlink.status; pr_info("urb->status %d\n", urb->status); - spin_lock(&the_controller->lock); + spin_lock_irqsave(&the_controller->lock, flags); usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); - spin_unlock(&the_controller->lock); + spin_unlock_irqrestore(&the_controller->lock, flags); usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 0fd96c10271d..8599545cdf9e 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -614,13 +614,12 @@ int iscsit_add_reject( hdr = (struct iscsi_reject *) cmd->pdu; hdr->reason = reason; - cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); + cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); if (!cmd->buf_ptr) { pr_err("Unable to allocate memory for cmd->buf_ptr\n"); iscsit_release_cmd(cmd); return -1; } - memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN); spin_lock_bh(&conn->cmd_lock); list_add_tail(&cmd->i_list, &conn->conn_cmd_list); @@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd( hdr = (struct iscsi_reject *) cmd->pdu; hdr->reason = reason; - cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); + cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); if (!cmd->buf_ptr) { pr_err("Unable to allocate memory for cmd->buf_ptr\n"); iscsit_release_cmd(cmd); return -1; } - memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN); if (add_to_conn) { spin_lock_bh(&conn->cmd_lock); @@ -1017,11 +1015,6 @@ done: " non-existent or non-exported iSCSI LUN:" " 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); } - if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES) - return iscsit_add_reject_from_cmd( - ISCSI_REASON_BOOKMARK_NO_RESOURCES, - 1, 1, buf, cmd); - send_check_condition = 1; goto attach_cmd; } @@ -1044,6 +1037,8 @@ done: */ send_check_condition = 1; } else { + cmd->data_length = cmd->se_cmd.data_length; + if (iscsit_decide_list_to_build(cmd, payload_length) < 0) return iscsit_add_reject_from_cmd( ISCSI_REASON_BOOKMARK_NO_RESOURCES, @@ -1123,7 +1118,7 @@ attach_cmd: * the backend memory allocation. */ ret = transport_generic_new_cmd(&cmd->se_cmd); - if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) { + if (ret < 0) { immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; dump_immediate_data = 1; goto after_immediate_data; @@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) spin_lock_irqsave(&se_cmd->t_state_lock, flags); if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || - (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED)) + (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)) dump_unsolicited_data = 1; spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); @@ -2513,10 +2508,10 @@ static int iscsit_send_data_in( if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; - hdr->residual_count = cpu_to_be32(cmd->residual_count); + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; - hdr->residual_count = cpu_to_be32(cmd->residual_count); + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); } } hton24(hdr->dlength, datain.length); @@ -3018,10 +3013,10 @@ static int iscsit_send_status( hdr->flags |= ISCSI_FLAG_CMD_FINAL; if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; - hdr->residual_count = cpu_to_be32(cmd->residual_count); + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; - hdr->residual_count = cpu_to_be32(cmd->residual_count); + hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); } hdr->response = cmd->iscsi_response; hdr->cmd_status = cmd->se_cmd.scsi_status; @@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp( hdr = (struct iscsi_tm_rsp *) cmd->pdu; memset(hdr, 0, ISCSI_HDR_LEN); hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; + hdr->flags = ISCSI_FLAG_CMD_FINAL; hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); hdr->itt = cpu_to_be32(cmd->init_task_tag); cmd->stat_sn = conn->stat_sn++; diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index beb39469e7f1..1cd6ce373b83 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -30,9 +30,11 @@ static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) { - int j = DIV_ROUND_UP(len, 2); + int j = DIV_ROUND_UP(len, 2), rc; - hex2bin(dst, src, j); + rc = hex2bin(dst, src, j); + if (rc < 0) + pr_debug("CHAP string contains non hex digit symbols\n"); dst[j] = '\0'; return j; diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 3723d90d5ae5..f1a02dad05a0 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -398,7 +398,6 @@ struct iscsi_cmd { u32 pdu_send_order; /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */ u32 pdu_start; - u32 residual_count; /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */ u32 seq_send_order; /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */ @@ -535,7 +534,6 @@ struct iscsi_conn { atomic_t connection_exit; atomic_t connection_recovery; atomic_t connection_reinstatement; - atomic_t connection_wait; atomic_t connection_wait_rcfr; atomic_t sleep_on_conn_wait_comp; atomic_t transport_failed; @@ -643,7 +641,6 @@ struct iscsi_session { atomic_t session_reinstatement; atomic_t session_stop_active; atomic_t sleep_on_sess_wait_comp; - atomic_t transport_wait_cmds; /* connection list */ struct list_head sess_conn_list; struct list_head cr_active_list; diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index c4c68da3e500..101b1beb3bca 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well. */ if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) { - if (se_cmd->se_cmd_flags & - SCF_SCSI_RESERVATION_CONFLICT) { + if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) { cmd->i_state = ISTATE_SEND_STATUS; spin_unlock_bh(&cmd->istate_lock); iscsit_add_cmd_to_response_queue(cmd, cmd->conn, diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index daad362a93ce..d734bdec24f9 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1( iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); pr_err("Could not allocate memory for session\n"); - return -1; + return -ENOMEM; } iscsi_login_set_conn_values(sess, conn, pdu->cid); @@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1( pr_err("idr_pre_get() for sess_idr failed\n"); iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); - return -1; + kfree(sess); + return -ENOMEM; } spin_lock(&sess_idr_lock); idr_get_new(&sess_idr, NULL, &sess->session_index); @@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1( ISCSI_LOGIN_STATUS_NO_RESOURCES); pr_err("Unable to allocate memory for" " struct iscsi_sess_ops.\n"); - return -1; + kfree(sess); + return -ENOMEM; } sess->se_sess = transport_init_session(); - if (!sess->se_sess) { + if (IS_ERR(sess->se_sess)) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); - return -1; + kfree(sess); + return -ENOMEM; } return 0; diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 426cd4bf6a9a..98936cb7c294 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation( return NULL; } - login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); + login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL); if (!login->req) { pr_err("Unable to allocate memory for Login Request.\n"); iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); goto out; } - memcpy(login->req, login_pdu, ISCSI_HDR_LEN); login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL); if (!login->req_buf) { diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 3df1c9b8ae6b..81d5832fbbd5 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr, &tl_cmd->tl_sense_buf[0]); - /* - * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi - */ if (scsi_bidi_cmnd(sc)) - se_cmd->t_tasks_bidi = 1; + se_cmd->se_cmd_flags |= SCF_BIDI; + /* * Locate the struct se_lun pointer and attach it to struct se_cmd */ @@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) * Allocate the necessary tasks to complete the received CDB+data */ ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); - if (ret == -ENOMEM) { - /* Out of Resources */ - return PYX_TRANSPORT_LU_COMM_FAILURE; - } else if (ret == -EINVAL) { - /* - * Handle case for SAM_STAT_RESERVATION_CONFLICT - */ - if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) - return PYX_TRANSPORT_RESERVATION_CONFLICT; - /* - * Otherwise, return SAM_STAT_CHECK_CONDITION and return - * sense data. - */ - return PYX_TRANSPORT_USE_SENSE_REASON; - } - + if (ret != 0) + return ret; /* * For BIDI commands, pass in the extra READ buffer * to transport_generic_map_mem_to_cmd() below.. */ - if (se_cmd->t_tasks_bidi) { + if (se_cmd->se_cmd_flags & SCF_BIDI) { struct scsi_data_buffer *sdb = scsi_in(sc); sgl_bidi = sdb->table.sgl; @@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) } /* Tell the core about our preallocated memory */ - ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), + return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); - if (ret < 0) - return PYX_TRANSPORT_LU_COMM_FAILURE; - - return 0; } /* @@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba( { struct tcm_loop_hba *tl_hba = container_of(wwn, struct tcm_loop_hba, tl_hba_wwn); - int host_no = tl_hba->sh->host_no; + + pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target" + " SAS Address: %s at Linux/SCSI Host ID: %d\n", + tl_hba->tl_wwn_address, tl_hba->sh->host_no); /* * Call device_unregister() on the original tl_hba->dev. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will * release *tl_hba; */ device_unregister(&tl_hba->dev); - - pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target" - " SAS Address: %s at Linux/SCSI Host ID: %d\n", - config_item_name(&wwn->wwn_group.cg_item), host_no); } /* Start items for tcm_loop_cit */ diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 88f2ad43ec8b..1dcbef499d6a 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task) int alua_access_state, primary = 0, rc; u16 tg_pt_id, rtpi; - if (!l_port) - return PYX_TRANSPORT_LU_COMM_FAILURE; - + if (!l_port) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; + } buf = transport_kmap_first_data_page(cmd); /* @@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; if (!l_tg_pt_gp_mem) { pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); - rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + rc = -EINVAL; goto out; } spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) if (!l_tg_pt_gp) { spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); - rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + rc = -EINVAL; goto out; } rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); @@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) if (!rc) { pr_debug("Unable to process SET_TARGET_PORT_GROUPS" " while TPGS_EXPLICT_ALUA is disabled\n"); - rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + rc = -EINVAL; goto out; } @@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) * REQUEST, and the additional sense code set to INVALID * FIELD IN PARAMETER LIST. */ - rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + rc = -EINVAL; goto out; } rc = -1; @@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) * throw an exception with ASCQ: INVALID_PARAMETER_LIST */ if (rc != 0) { - rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + rc = -EINVAL; goto out; } } else { @@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task) * INVALID_PARAMETER_LIST */ if (rc != 0) { - rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + rc = -EINVAL; goto out; } } @@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) * struct t10_alua_lu_gp. */ spin_lock(&lu_gps_lock); - atomic_set(&lu_gp->lu_gp_shutdown, 1); list_del(&lu_gp->lu_gp_node); alua_lu_gps_count--; spin_unlock(&lu_gps_lock); @@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( tg_pt_gp_mem->tg_pt = port; port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; - atomic_set(&port->sep_tg_pt_gp_active, 1); return tg_pt_gp_mem; } diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 683ba02b8247..831468b3163d 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) if (cmd->data_length < 60) return 0; - buf[2] = 0x3c; + buf[3] = 0x3c; /* Set HEADSUP, ORDSUP, SIMPSUP */ buf[5] = 0x07; @@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task) if (cmd->data_length < 4) { pr_err("SCSI Inquiry payload length: %u" " too small for EVPD=1\n", cmd->data_length); + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; return -EINVAL; } @@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task) } pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; ret = -EINVAL; out_unmap: @@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task) default: pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", cdb[2] & 0x3f, cdb[3]); - return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; + cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; + return -EINVAL; } offset += length; @@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task) if (cdb[1] & 0x01) { pr_err("REQUEST_SENSE description emulation not" " supported\n"); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -ENOSYS; } buf = transport_kmap_first_data_page(cmd); @@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task) if (!dev->transport->do_discard) { pr_err("UNMAP emulation not supported for: %s\n", dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + return -ENOSYS; } /* First UNMAP block descriptor starts at 8 byte offset */ @@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task) if (!dev->transport->do_discard) { pr_err("WRITE_SAME emulation not supported" " for: %s\n", dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + return -ENOSYS; } if (cmd->t_task_cdb[0] == WRITE_SAME) @@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task) int target_emulate_synchronize_cache(struct se_task *task) { struct se_device *dev = task->task_se_cmd->se_dev; + struct se_cmd *cmd = task->task_se_cmd; if (!dev->transport->do_sync_cache) { pr_err("SYNCHRONIZE_CACHE emulation not supported" " for: %s\n", dev->transport->name); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + return -ENOSYS; } dev->transport->do_sync_cache(task); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index e0c1e8a8dd4e..93d4f6a1b798 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup; static struct config_group alua_group; static struct config_group alua_lu_gps_group; -static DEFINE_SPINLOCK(se_device_lock); -static LIST_HEAD(se_dev_list); - static inline struct se_hba * item_to_hba(struct config_item *item) { @@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev( " struct se_subsystem_dev\n"); goto unlock; } - INIT_LIST_HEAD(&se_dev->se_dev_node); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); @@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev( " from allocate_virtdevice()\n"); goto out; } - spin_lock(&se_device_lock); - list_add_tail(&se_dev->se_dev_node, &se_dev_list); - spin_unlock(&se_device_lock); config_group_init_type_name(&se_dev->se_dev_group, name, &target_core_dev_cit); @@ -2874,10 +2867,6 @@ static void target_core_drop_subdev( mutex_lock(&hba->hba_access_mutex); t = hba->transport; - spin_lock(&se_device_lock); - list_del(&se_dev->se_dev_node); - spin_unlock(&se_device_lock); - dev_stat_grp = &se_dev->dev_stat_grps.stat_group; for (i = 0; dev_stat_grp->default_groups[i]; i++) { df_item = &dev_stat_grp->default_groups[i]->cg_item; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ba5edec2c5f8..9b8639425472 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); @@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->orig_fe_lun = 0; - se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } /* @@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = se_cmd->se_dev; } spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); @@ -708,7 +705,7 @@ done: se_task->task_scsi_status = GOOD; transport_complete_task(se_task, 1); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* se_release_device_for_hba(): @@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag) return -EINVAL; } - pr_err("dpo_emulated not supported\n"); - return -EINVAL; + if (flag) { + pr_err("dpo_emulated not supported\n"); + return -EINVAL; + } + + return 0; } int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) @@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) return -EINVAL; } - if (dev->transport->fua_write_emulated == 0) { + if (flag && dev->transport->fua_write_emulated == 0) { pr_err("fua_write_emulated not supported\n"); return -EINVAL; } @@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) return -EINVAL; } - pr_err("ua read emulated not supported\n"); - return -EINVAL; + if (flag) { + pr_err("ua read emulated not supported\n"); + return -EINVAL; + } + + return 0; } int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) @@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->write_cache_emulated == 0) { + if (flag && dev->transport->write_cache_emulated == 0) { pr_err("write_cache_emulated not supported\n"); return -EINVAL; } @@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { + if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { pr_err("Generic Block Discard not supported\n"); return -ENOSYS; } @@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { + if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { pr_err("Generic Block Discard not supported\n"); return -ENOSYS; } @@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void) ret = -ENOMEM; goto out; } - INIT_LIST_HEAD(&se_dev->se_dev_node); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 67cd6fe05bfa..b4864fba4ef0 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task) return -ENOMEM; } - for (i = 0; i < task->task_sg_nents; i++) { - iov[i].iov_len = sg[i].length; - iov[i].iov_base = sg_virt(&sg[i]); + for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + iov[i].iov_len = sg->length; + iov[i].iov_base = sg_virt(sg); } old_fs = get_fs(); @@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task) return -ENOMEM; } - for (i = 0; i < task->task_sg_nents; i++) { - iov[i].iov_len = sg[i].length; - iov[i].iov_base = sg_virt(&sg[i]); + for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + iov[i].iov_len = sg->length; + iov[i].iov_base = sg_virt(sg); } old_fs = get_fs(); @@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task) if (ret > 0 && dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - cmd->t_tasks_fua) { + (cmd->se_cmd_flags & SCF_FUA)) { /* * We might need to be a bit smarter here * and return some sense data to let the initiator @@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task) } - if (ret < 0) + if (ret < 0) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return ret; + } if (ret) { task->task_scsi_status = GOOD; transport_complete_task(task, 1); } - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* fd_free_task(): (Part of se_subsystem_api_t template) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7698efe29262..4aa992204438 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task) */ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - task->task_se_cmd->t_tasks_fua)) + (cmd->se_cmd_flags & SCF_FUA))) rw = WRITE_FUA; else rw = WRITE; @@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task) else { pr_err("Unsupported SCSI -> BLOCK LBA conversion:" " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOSYS; } bio = iblock_get_bio(task, block_lba, sg_num); - if (!bio) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + if (!bio) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; + } bio_list_init(&list); bio_list_add(&list, bio); @@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task) submit_bio(rw, bio); blk_finish_plug(&plug); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; fail: while ((bio = bio_list_pop(&list))) bio_put(bio); - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; } static u32 iblock_get_device_rev(struct se_device *dev) diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 5a4ebfc3a54f..95dee7074aeb 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret) pr_err("Received legacy SPC-2 RESERVE/RELEASE" " while active SPC-3 registrations exist," " returning RESERVATION_CONFLICT\n"); - *ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; return true; } @@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task) (cmd->t_task_cdb[1] & 0x02)) { pr_err("LongIO and Obselete Bits set, returning" " ILLEGAL_REQUEST\n"); - ret = PYX_TRANSPORT_ILLEGAL_REQUEST; + cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; + ret = -EINVAL; goto out; } /* @@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task) " from %s \n", cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); - ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + ret = -EINVAL; goto out_unlock; } @@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port( tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); if (!tidh_new) { pr_err("Unable to allocate tidh_new\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } INIT_LIST_HEAD(&tidh_new->dest_list); tidh_new->dest_tpg = tpg; @@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port( sa_res_key, all_tg_pt, aptpl); if (!local_pr_reg) { kfree(tidh_new); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; } tidh_new->dest_pr_reg = local_pr_reg; /* @@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port( pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" " does not equal CDB data_length: %u\n", tpdl, cmd->data_length); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } /* @@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port( " for tmp_tpg\n"); atomic_dec(&tmp_tpg->tpg_pr_ref_count); smp_mb__after_atomic_dec(); - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -EINVAL; goto out; } /* @@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port( atomic_dec(&dest_node_acl->acl_pr_ref_count); smp_mb__after_atomic_dec(); core_scsi3_tpg_undepend_item(tmp_tpg); - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -EINVAL; goto out; } @@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port( if (!dest_tpg) { pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" " dest_tpg\n"); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } #if 0 @@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port( " %u for Transport ID: %s\n", tid_len, ptr); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } /* @@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port( core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } @@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port( smp_mb__after_atomic_dec(); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -EINVAL; goto out; } #if 0 @@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port( core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -ENOMEM; goto out; } INIT_LIST_HEAD(&tidh_new->dest_list); @@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port( core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); kfree(tidh_new); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } tidh_new->dest_pr_reg = dest_pr_reg; @@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register( if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } se_tpg = se_sess->se_tpg; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; @@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register( if (res_key) { pr_warn("SPC-3 PR: Reservation Key non-zero" " for SA REGISTER, returning CONFLICT\n"); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * Do nothing but return GOOD status. */ if (!sa_res_key) - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; if (!spec_i_pt) { /* @@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register( if (ret != 0) { pr_err("Unable to allocate" " struct t10_pr_registration\n"); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } } else { /* @@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register( " 0x%016Lx\n", res_key, pr_reg->pr_res_key); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } } if (spec_i_pt) { pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" " set while sa_res_key=0\n"); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } /* * An existing ALL_TG_PT=1 registration being released @@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register( " registration exists, but ALL_TG_PT=1 bit not" " present in received PROUT\n"); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } /* * Allocate APTPL metadata buffer used for UNREGISTER ops @@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register( pr_err("Unable to allocate" " pr_aptpl_buf\n"); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } } /* @@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register( if (pr_holder < 0) { kfree(pr_aptpl_buf); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } spin_lock(&pr_tmpl->registration_lock); @@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve( if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } se_tpg = se_sess->se_tpg; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; @@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve( if (!pr_reg) { pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for RESERVE\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } /* * From spc4r17 Section 5.7.9: Reserving: @@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve( " does not match existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, pr_reg->pr_res_key); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * From spc4r17 Section 5.7.9: Reserving: @@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve( if (scope != PR_SCOPE_LU_SCOPE) { pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } /* * See if we have an existing PR reservation holder pointer at @@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve( spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * From spc4r17 Section 5.7.9: Reserving: @@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve( spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * From spc4r17 Section 5.7.9: Reserving: @@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve( */ spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* * Otherwise, our *pr_reg becomes the PR reservation holder for said @@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve( default: pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" " 0x%02x\n", type); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } return ret; @@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release( if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } /* * Locate the existing *pr_reg via struct se_node_acl pointers @@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release( if (!pr_reg) { pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for RELEASE\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } /* * From spc4r17 Section 5.7.11.2 Releasing: @@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release( */ spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) @@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release( */ spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* * From spc4r17 Section 5.7.11.2 Releasing: @@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release( " 0x%016Lx\n", res_key, pr_reg->pr_res_key); spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * From spc4r17 Section 5.7.11.2 Releasing and above: @@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release( spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * In response to a persistent reservation release request from the @@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear( if (!pr_reg_n) { pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for CLEAR\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } /* * From spc4r17 section 5.7.11.6, Clearing: @@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear( " existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); core_scsi3_put_pr_reg(pr_reg_n); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * a) Release the persistent reservation, if any; @@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt( int all_reg = 0, calling_it_nexus = 0, released_regs = 0; int prh_type = 0, prh_scope = 0, ret; - if (!se_sess) - return PYX_TRANSPORT_LU_COMM_FAILURE; + if (!se_sess) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; + } se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, @@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt( pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for PREEMPT%s\n", (abort) ? "_AND_ABORT" : ""); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } if (pr_reg_n->pr_res_key != res_key) { core_scsi3_put_pr_reg(pr_reg_n); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } if (scope != PR_SCOPE_LU_SCOPE) { pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); core_scsi3_put_pr_reg(pr_reg_n); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } INIT_LIST_HEAD(&preempt_and_abort_list); @@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt( if (!all_reg && !sa_res_key) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } /* * From spc4r17, section 5.7.11.4.4 Removing Registrations: @@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt( if (!released_regs) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * For an existing all registrants type reservation @@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt( default: pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } return ret; @@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move( if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } memset(dest_iport, 0, 64); memset(i_buf, 0, PR_REG_ISID_ID_LEN); @@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move( if (!pr_reg) { pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" " *pr_reg for REGISTER_AND_MOVE\n"); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } /* * The provided reservation key much match the existing reservation key @@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move( " res_key: 0x%016Lx does not match existing SA REGISTER" " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } /* * The service active reservation key needs to be non zero @@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move( pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" " sa_res_key\n"); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } /* @@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move( " does not equal CDB data_length: %u\n", tid_len, cmd->data_length); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } spin_lock(&dev->se_port_lock); @@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move( atomic_dec(&dest_se_tpg->tpg_pr_ref_count); smp_mb__after_atomic_dec(); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } spin_lock(&dev->se_port_lock); @@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move( " fabric ops from Relative Target Port Identifier:" " %hu\n", rtpi); core_scsi3_put_pr_reg(pr_reg); - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; } buf = transport_kmap_first_data_page(cmd); @@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move( " from fabric: %s\n", proto_ident, dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), dest_tf_ops->get_fabric_name()); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" " containg a valid tpg_parse_pr_out_transport_id" " function pointer\n"); - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -EINVAL; goto out; } initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, @@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move( if (!initiator_str) { pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" " initiator_str from Transport ID\n"); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } @@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move( pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" " matches: %s on received I_T Nexus\n", initiator_str, pr_reg_nacl->initiatorname); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { @@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move( " matches: %s %s on received I_T Nexus\n", initiator_str, iport_ptr, pr_reg_nacl->initiatorname, pr_reg->pr_reg_isid); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } after_iport_check: @@ -3517,7 +3576,8 @@ after_iport_check: pr_err("Unable to locate %s dest_node_acl for" " TransportID%s\n", dest_tf_ops->get_fabric_name(), initiator_str); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } ret = core_scsi3_nodeacl_depend_item(dest_node_acl); @@ -3527,7 +3587,8 @@ after_iport_check: atomic_dec(&dest_node_acl->acl_pr_ref_count); smp_mb__after_atomic_dec(); dest_node_acl = NULL; - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } #if 0 @@ -3543,7 +3604,8 @@ after_iport_check: if (!dest_se_deve) { pr_err("Unable to locate %s dest_se_deve from RTPI:" " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } @@ -3553,7 +3615,8 @@ after_iport_check: atomic_dec(&dest_se_deve->pr_ref_count); smp_mb__after_atomic_dec(); dest_se_deve = NULL; - ret = PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + ret = -EINVAL; goto out; } #if 0 @@ -3572,7 +3635,8 @@ after_iport_check: pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" " currently held\n"); spin_unlock(&dev->dev_reservation_lock); - ret = PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + ret = -EINVAL; goto out; } /* @@ -3585,7 +3649,8 @@ after_iport_check: pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" " Nexus is not reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); - ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + ret = -EINVAL; goto out; } /* @@ -3603,7 +3668,8 @@ after_iport_check: " reservation for type: %s\n", core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); spin_unlock(&dev->dev_reservation_lock); - ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + ret = -EINVAL; goto out; } pr_res_nacl = pr_res_holder->pr_reg_nacl; @@ -3640,7 +3706,8 @@ after_iport_check: sa_res_key, 0, aptpl, 2, 1); if (ret != 0) { spin_unlock(&dev->dev_reservation_lock); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, @@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task) pr_err("Received PERSISTENT_RESERVE CDB while legacy" " SPC-2 reservation is held, returning" " RESERVATION_CONFLICT\n"); - ret = PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + ret = EINVAL; goto out; } @@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task) * FIXME: A NULL struct se_session pointer means an this is not coming from * a $FABRIC_MOD's nexus, but from internal passthrough ops. */ - if (!cmd->se_sess) - return PYX_TRANSPORT_LU_COMM_FAILURE; + if (!cmd->se_sess) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; + } if (cmd->data_length < 24) { pr_warn("SPC-PR: Received PR OUT parameter list" " length too small: %u\n", cmd->data_length); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } /* @@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task) * SPEC_I_PT=1 is only valid for Service action: REGISTER */ if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) { - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } @@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task) (cmd->data_length != 24)) { pr_warn("SPC-PR: Received PR OUT illegal parameter" " list length: %u\n", cmd->data_length); - ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; goto out; } /* @@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task) default: pr_err("Unknown PERSISTENT_RESERVE_OUT service" " action: 0x%02x\n", cdb[1] & 0x1f); - ret = PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + ret = -EINVAL; break; } @@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) if (cmd->data_length < 8) { pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" " too small\n", cmd->data_length); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } buf = transport_kmap_first_data_page(cmd); @@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) if (cmd->data_length < 8) { pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" " too small\n", cmd->data_length); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } buf = transport_kmap_first_data_page(cmd); @@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) if (cmd->data_length < 6) { pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" " %u too small\n", cmd->data_length); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } buf = transport_kmap_first_data_page(cmd); @@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) if (cmd->data_length < 8) { pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" " too small\n", cmd->data_length); - return PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; } buf = transport_kmap_first_data_page(cmd); @@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task) pr_err("Received PERSISTENT_RESERVE CDB while legacy" " SPC-2 reservation is held, returning" " RESERVATION_CONFLICT\n"); - return PYX_TRANSPORT_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EINVAL; } switch (cmd->t_task_cdb[1] & 0x1f) { @@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task) default: pr_err("Unknown PERSISTENT_RESERVE_IN service" " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); - ret = PYX_TRANSPORT_INVALID_CDB_FIELD; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + ret = -EINVAL; break; } diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index ed32e1efe429..8b15e56b0384 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num) static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, struct bio **hbio) { + struct se_cmd *cmd = task->task_se_cmd; struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; u32 task_sg_num = task->task_sg_nents; struct bio *bio = NULL, *tbio = NULL; @@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, u32 data_len = task->task_size, i, len, bytes, off; int nr_pages = (task->task_size + task_sg[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; - int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + int nr_vecs = 0, rc; int rw = (task->task_data_direction == DMA_TO_DEVICE); *hbio = NULL; @@ -1058,11 +1059,13 @@ fail: bio->bi_next = NULL; bio_endio(bio, 0); /* XXX: should be error */ } - return ret; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; } static int pscsi_do_task(struct se_task *task) { + struct se_cmd *cmd = task->task_se_cmd; struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; struct pscsi_plugin_task *pt = PSCSI_TASK(task); struct request *req; @@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task) if (!req || IS_ERR(req)) { pr_err("PSCSI: blk_get_request() failed: %ld\n", req ? IS_ERR(req) : -ENOMEM); - return PYX_TRANSPORT_LU_COMM_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENODEV; } } else { BUG_ON(!task->task_size); @@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task) * Setup the main struct request for the task->task_sg[] payload */ ret = pscsi_map_sg(task, task->task_sg, &hbio); - if (ret < 0) - return PYX_TRANSPORT_LU_COMM_FAILURE; + if (ret < 0) { + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return ret; + } req = blk_make_request(pdv->pdv_sd->request_queue, hbio, GFP_KERNEL); @@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task) (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), pscsi_req_done); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; fail: while (hbio) { @@ -1124,7 +1132,8 @@ fail: bio->bi_next = NULL; bio_endio(bio, 0); /* XXX: should be error */ } - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -ENOMEM; } /* pscsi_get_sense_buffer(): @@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status( " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], pt->pscsi_result); task->task_scsi_status = SAM_STAT_CHECK_CONDITION; - task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - task->task_se_cmd->transport_error_status = - PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + task->task_se_cmd->scsi_sense_reason = + TCM_UNSUPPORTED_SCSI_OPCODE; transport_complete_task(task, 0); break; } diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 5158d3846f19..02e51faa2f4e 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) return NULL; } -/* rd_MEMCPY_read(): - * - * - */ -static int rd_MEMCPY_read(struct rd_request *req) +static int rd_MEMCPY(struct rd_request *req, u32 read_rd) { struct se_task *task = &req->rd_task; struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; struct rd_dev_sg_table *table; - struct scatterlist *sg_d, *sg_s; - void *dst, *src; - u32 i = 0, j = 0, dst_offset = 0, src_offset = 0; - u32 length, page_end = 0, table_sg_end; + struct scatterlist *rd_sg; + struct sg_mapping_iter m; u32 rd_offset = req->rd_offset; + u32 src_len; table = rd_get_sg_table(dev, req->rd_page); if (!table) return -EINVAL; - table_sg_end = (table->page_end_offset - req->rd_page); - sg_d = task->task_sg; - sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; + rd_sg = &table->sg_table[req->rd_page - table->page_start_offset]; - pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" - " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, - req->rd_page, req->rd_offset); - - src_offset = rd_offset; + pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", + dev->rd_dev_id, read_rd ? "Read" : "Write", + task->task_lba, req->rd_size, req->rd_page, + rd_offset); + src_len = PAGE_SIZE - rd_offset; + sg_miter_start(&m, task->task_sg, task->task_sg_nents, + read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG); while (req->rd_size) { - if ((sg_d[i].length - dst_offset) < - (sg_s[j].length - src_offset)) { - length = (sg_d[i].length - dst_offset); - - pr_debug("Step 1 - sg_d[%d]: %p length: %d" - " offset: %u sg_s[%d].length: %u\n", i, - &sg_d[i], sg_d[i].length, sg_d[i].offset, j, - sg_s[j].length); - pr_debug("Step 1 - length: %u dst_offset: %u" - " src_offset: %u\n", length, dst_offset, - src_offset); - - if (length > req->rd_size) - length = req->rd_size; - - dst = sg_virt(&sg_d[i++]) + dst_offset; - BUG_ON(!dst); - - src = sg_virt(&sg_s[j]) + src_offset; - BUG_ON(!src); - - dst_offset = 0; - src_offset = length; - page_end = 0; - } else { - length = (sg_s[j].length - src_offset); - - pr_debug("Step 2 - sg_d[%d]: %p length: %d" - " offset: %u sg_s[%d].length: %u\n", i, - &sg_d[i], sg_d[i].length, sg_d[i].offset, - j, sg_s[j].length); - pr_debug("Step 2 - length: %u dst_offset: %u" - " src_offset: %u\n", length, dst_offset, - src_offset); - - if (length > req->rd_size) - length = req->rd_size; - - dst = sg_virt(&sg_d[i]) + dst_offset; - BUG_ON(!dst); - - if (sg_d[i].length == length) { - i++; - dst_offset = 0; - } else - dst_offset = length; - - src = sg_virt(&sg_s[j++]) + src_offset; - BUG_ON(!src); - - src_offset = 0; - page_end = 1; - } + u32 len; + void *rd_addr; - memcpy(dst, src, length); + sg_miter_next(&m); + len = min((u32)m.length, src_len); + m.consumed = len; - pr_debug("page: %u, remaining size: %u, length: %u," - " i: %u, j: %u\n", req->rd_page, - (req->rd_size - length), length, i, j); + rd_addr = sg_virt(rd_sg) + rd_offset; - req->rd_size -= length; - if (!req->rd_size) - return 0; + if (read_rd) + memcpy(m.addr, rd_addr, len); + else + memcpy(rd_addr, m.addr, len); - if (!page_end) + req->rd_size -= len; + if (!req->rd_size) continue; - if (++req->rd_page <= table->page_end_offset) { - pr_debug("page: %u in same page table\n", - req->rd_page); + src_len -= len; + if (src_len) { + rd_offset += len; continue; } - pr_debug("getting new page table for page: %u\n", - req->rd_page); - - table = rd_get_sg_table(dev, req->rd_page); - if (!table) - return -EINVAL; - - sg_s = &table->sg_table[j = 0]; - } - - return 0; -} - -/* rd_MEMCPY_write(): - * - * - */ -static int rd_MEMCPY_write(struct rd_request *req) -{ - struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; - struct rd_dev_sg_table *table; - struct scatterlist *sg_d, *sg_s; - void *dst, *src; - u32 i = 0, j = 0, dst_offset = 0, src_offset = 0; - u32 length, page_end = 0, table_sg_end; - u32 rd_offset = req->rd_offset; - - table = rd_get_sg_table(dev, req->rd_page); - if (!table) - return -EINVAL; - - table_sg_end = (table->page_end_offset - req->rd_page); - sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; - sg_s = task->task_sg; - - pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u," - " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, - req->rd_page, req->rd_offset); - - dst_offset = rd_offset; - - while (req->rd_size) { - if ((sg_s[i].length - src_offset) < - (sg_d[j].length - dst_offset)) { - length = (sg_s[i].length - src_offset); - - pr_debug("Step 1 - sg_s[%d]: %p length: %d" - " offset: %d sg_d[%d].length: %u\n", i, - &sg_s[i], sg_s[i].length, sg_s[i].offset, - j, sg_d[j].length); - pr_debug("Step 1 - length: %u src_offset: %u" - " dst_offset: %u\n", length, src_offset, - dst_offset); - - if (length > req->rd_size) - length = req->rd_size; - - src = sg_virt(&sg_s[i++]) + src_offset; - BUG_ON(!src); - - dst = sg_virt(&sg_d[j]) + dst_offset; - BUG_ON(!dst); - - src_offset = 0; - dst_offset = length; - page_end = 0; - } else { - length = (sg_d[j].length - dst_offset); - - pr_debug("Step 2 - sg_s[%d]: %p length: %d" - " offset: %d sg_d[%d].length: %u\n", i, - &sg_s[i], sg_s[i].length, sg_s[i].offset, - j, sg_d[j].length); - pr_debug("Step 2 - length: %u src_offset: %u" - " dst_offset: %u\n", length, src_offset, - dst_offset); - - if (length > req->rd_size) - length = req->rd_size; - - src = sg_virt(&sg_s[i]) + src_offset; - BUG_ON(!src); - - if (sg_s[i].length == length) { - i++; - src_offset = 0; - } else - src_offset = length; - - dst = sg_virt(&sg_d[j++]) + dst_offset; - BUG_ON(!dst); - - dst_offset = 0; - page_end = 1; - } - - memcpy(dst, src, length); - - pr_debug("page: %u, remaining size: %u, length: %u," - " i: %u, j: %u\n", req->rd_page, - (req->rd_size - length), length, i, j); - - req->rd_size -= length; - if (!req->rd_size) - return 0; - - if (!page_end) - continue; - - if (++req->rd_page <= table->page_end_offset) { - pr_debug("page: %u in same page table\n", - req->rd_page); + /* rd page completed, next one please */ + req->rd_page++; + rd_offset = 0; + src_len = PAGE_SIZE; + if (req->rd_page <= table->page_end_offset) { + rd_sg++; continue; } - pr_debug("getting new page table for page: %u\n", - req->rd_page); - table = rd_get_sg_table(dev, req->rd_page); - if (!table) + if (!table) { + sg_miter_stop(&m); return -EINVAL; + } - sg_d = &table->sg_table[j = 0]; + /* since we increment, the first sg entry is correct */ + rd_sg = table->sg_table; } - + sg_miter_stop(&m); return 0; } @@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task) { struct se_device *dev = task->task_se_cmd->se_dev; struct rd_request *req = RD_REQ(task); - unsigned long long lba; + u64 tmp; int ret; - req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE; - lba = task->task_lba; - req->rd_offset = (do_div(lba, - (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) * - dev->se_sub_dev->se_dev_attrib.block_size; + tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; + req->rd_offset = do_div(tmp, PAGE_SIZE); + req->rd_page = tmp; req->rd_size = task->task_size; - if (task->task_data_direction == DMA_FROM_DEVICE) - ret = rd_MEMCPY_read(req); - else - ret = rd_MEMCPY_write(req); - + ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE); if (ret != 0) return ret; task->task_scsi_status = GOOD; transport_complete_task(task, 1); - - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* rd_free_task(): (Part of se_subsystem_api_t template) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 217e29df6297..684522805a1f 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list( " %d t_fe_count: %d\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, cmd->t_state, atomic_read(&cmd->t_fe_count)); - /* - * Signal that the command has failed via cmd->se_cmd_flags, - */ - transport_new_cmd_failure(cmd); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, atomic_read(&cmd->t_fe_count)); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 3400ae6e93f8..0257658e2e3e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -61,7 +61,6 @@ static int sub_api_initialized; static struct workqueue_struct *target_completion_wq; -static struct kmem_cache *se_cmd_cache; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_tmr_req_cache; struct kmem_cache *se_ua_cache; @@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); -static void transport_generic_request_failure(struct se_cmd *, int, int); +static void transport_generic_request_failure(struct se_cmd *); static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) { - se_cmd_cache = kmem_cache_create("se_cmd_cache", - sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); - if (!se_cmd_cache) { - pr_err("kmem_cache_create for struct se_cmd failed\n"); - goto out; - } se_tmr_req_cache = kmem_cache_create("se_tmr_cache", sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 0, NULL); if (!se_tmr_req_cache) { pr_err("kmem_cache_create() for struct se_tmr_req" " failed\n"); - goto out_free_cmd_cache; + goto out; } se_sess_cache = kmem_cache_create("se_sess_cache", sizeof(struct se_session), __alignof__(struct se_session), @@ -182,8 +175,6 @@ out_free_sess_cache: kmem_cache_destroy(se_sess_cache); out_free_tmr_req_cache: kmem_cache_destroy(se_tmr_req_cache); -out_free_cmd_cache: - kmem_cache_destroy(se_cmd_cache); out: return -ENOMEM; } @@ -191,7 +182,6 @@ out: void release_se_kmem_caches(void) { destroy_workqueue(target_completion_wq); - kmem_cache_destroy(se_cmd_cache); kmem_cache_destroy(se_tmr_req_cache); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); @@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) task->task_scsi_status = GOOD; } else { task->task_scsi_status = SAM_STAT_CHECK_CONDITION; - task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; - task->task_se_cmd->transport_error_status = - PYX_TRANSPORT_ILLEGAL_REQUEST; + task->task_se_cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } transport_complete_task(task, good); @@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); - transport_generic_request_failure(cmd, 1, 1); + transport_generic_request_failure(cmd); } /* transport_complete_task(): @@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success) if (cmd->t_tasks_failed) { if (!task->task_error_status) { task->task_error_status = - PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - cmd->transport_error_status = - PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } + INIT_WORK(&cmd->work, target_complete_failure_work); } else { atomic_set(&cmd->t_transport_complete, 1); @@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba( dev->se_hba = hba; dev->se_sub_dev = se_dev; dev->transport = transport; - atomic_set(&dev->active_cmds, 0); INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_tmr_list); INIT_LIST_HEAD(&dev->execute_task_list); INIT_LIST_HEAD(&dev->delayed_cmd_list); - INIT_LIST_HEAD(&dev->ordered_cmd_list); INIT_LIST_HEAD(&dev->state_task_list); INIT_LIST_HEAD(&dev->qf_cmd_list); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); - spin_lock_init(&dev->ordered_cmd_lock); - spin_lock_init(&dev->state_task_lock); - spin_lock_init(&dev->dev_alua_lock); spin_lock_init(&dev->dev_reservation_lock); spin_lock_init(&dev->dev_status_lock); - spin_lock_init(&dev->dev_status_thr_lock); spin_lock_init(&dev->se_port_lock); spin_lock_init(&dev->se_tmr_lock); spin_lock_init(&dev->qf_cmd_lock); @@ -1507,7 +1492,6 @@ void transport_init_se_cmd( { INIT_LIST_HEAD(&cmd->se_lun_node); INIT_LIST_HEAD(&cmd->se_delayed_node); - INIT_LIST_HEAD(&cmd->se_ordered_node); INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->se_queue_node); INIT_LIST_HEAD(&cmd->se_cmd_list); @@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks( pr_err("Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; return -EINVAL; } /* @@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks( " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), (unsigned long)sizeof(cmd->__t_task_cdb)); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return -ENOMEM; } } else @@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct( * and call transport_generic_request_failure() if necessary.. */ ret = transport_generic_new_cmd(cmd); - if (ret < 0) { - cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, 0, - (cmd->data_direction != DMA_TO_DEVICE)); - } + if (ret < 0) + transport_generic_request_failure(cmd); + return 0; } EXPORT_SYMBOL(transport_handle_cdb_direct); @@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) /* * Handle SAM-esque emulation for generic transport request failures. */ -static void transport_generic_request_failure( - struct se_cmd *cmd, - int complete, - int sc) +static void transport_generic_request_failure(struct se_cmd *cmd) { int ret = 0; pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), cmd->t_task_cdb[0]); - pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n", + pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", cmd->se_tfo->get_cmd_state(cmd), - cmd->t_state, - cmd->transport_error_status); + cmd->t_state, cmd->scsi_sense_reason); pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_transport_active: %d t_transport_stop: %d" @@ -1829,46 +1812,19 @@ static void transport_generic_request_failure( if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) transport_complete_task_attr(cmd); - if (complete) { - cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; - } - - switch (cmd->transport_error_status) { - case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE: - cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; - break; - case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS: - cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; - break; - case PYX_TRANSPORT_INVALID_CDB_FIELD: - cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - break; - case PYX_TRANSPORT_INVALID_PARAMETER_LIST: - cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; - break; - case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES: - if (!sc) - transport_new_cmd_failure(cmd); - /* - * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES, - * we force this session to fall back to session - * recovery. - */ - cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); - cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); - - goto check_stop; - case PYX_TRANSPORT_LU_COMM_FAILURE: - case PYX_TRANSPORT_ILLEGAL_REQUEST: - cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - break; - case PYX_TRANSPORT_UNKNOWN_MODE_PAGE: - cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE; - break; - case PYX_TRANSPORT_WRITE_PROTECTED: - cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + switch (cmd->scsi_sense_reason) { + case TCM_NON_EXISTENT_LUN: + case TCM_UNSUPPORTED_SCSI_OPCODE: + case TCM_INVALID_CDB_FIELD: + case TCM_INVALID_PARAMETER_LIST: + case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: + case TCM_UNKNOWN_MODE_PAGE: + case TCM_WRITE_PROTECTED: + case TCM_CHECK_CONDITION_ABORT_CMD: + case TCM_CHECK_CONDITION_UNIT_ATTENTION: + case TCM_CHECK_CONDITION_NOT_READY: break; - case PYX_TRANSPORT_RESERVATION_CONFLICT: + case TCM_RESERVATION_CONFLICT: /* * No SENSE Data payload for this case, set SCSI Status * and queue the response to $FABRIC_MOD. @@ -1893,15 +1849,9 @@ static void transport_generic_request_failure( if (ret == -EAGAIN || ret == -ENOMEM) goto queue_full; goto check_stop; - case PYX_TRANSPORT_USE_SENSE_REASON: - /* - * struct se_cmd->scsi_sense_reason already set - */ - break; default: pr_err("Unknown transport error for CDB 0x%02x: %d\n", - cmd->t_task_cdb[0], - cmd->transport_error_status); + cmd->t_task_cdb[0], cmd->scsi_sense_reason); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; } @@ -1912,14 +1862,10 @@ static void transport_generic_request_failure( * transport_send_check_condition_and_sense() after handling * possible unsoliticied write data payloads. */ - if (!sc && !cmd->se_tfo->new_cmd_map) - transport_new_cmd_failure(cmd); - else { - ret = transport_send_check_condition_and_sense(cmd, - cmd->scsi_sense_reason, 0); - if (ret == -EAGAIN || ret == -ENOMEM) - goto queue_full; - } + ret = transport_send_check_condition_and_sense(cmd, + cmd->scsi_sense_reason, 0); + if (ret == -EAGAIN || ret == -ENOMEM) + goto queue_full; check_stop: transport_lun_remove_cmd(cmd); @@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * to allow the passed struct se_cmd list of tasks to the front of the list. */ if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_inc(&cmd->se_dev->dev_hoq_count); - smp_mb__after_atomic_inc(); pr_debug("Added HEAD_OF_QUEUE for CDB:" " 0x%02x, se_ordered_id: %u\n", cmd->t_task_cdb[0], cmd->se_ordered_id); return 1; } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&cmd->se_dev->ordered_cmd_lock); - list_add_tail(&cmd->se_ordered_node, - &cmd->se_dev->ordered_cmd_list); - spin_unlock(&cmd->se_dev->ordered_cmd_lock); - atomic_inc(&cmd->se_dev->dev_ordered_sync); smp_mb__after_atomic_inc(); @@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd) { int add_tasks; - if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { - cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; - transport_generic_request_failure(cmd, 0, 1); + if (se_dev_check_online(cmd->se_dev) != 0) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + transport_generic_request_failure(cmd); return 0; } @@ -2163,14 +2102,13 @@ check_depth: else error = dev->transport->do_task(task); if (error != 0) { - cmd->transport_error_status = error; spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); atomic_inc(&dev->depth_left); - transport_generic_request_failure(cmd, 0, 1); + transport_generic_request_failure(cmd); } goto check_depth; @@ -2178,19 +2116,6 @@ check_depth: return 0; } -void transport_new_cmd_failure(struct se_cmd *se_cmd) -{ - unsigned long flags; - /* - * Any unsolicited data will get dumped for failed command inside of - * the fabric plugin - */ - spin_lock_irqsave(&se_cmd->t_state_lock, flags); - se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); -} - static inline u32 transport_get_sectors_6( unsigned char *cdb, struct se_cmd *cmd, @@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6( /* * Everything else assume TYPE_DISK Sector CDB location. - * Use 8-bit sector value. + * Use 8-bit sector value. SBC-3 says: + * + * A TRANSFER LENGTH field set to zero specifies that 256 + * logical blocks shall be written. Any other value + * specifies the number of logical blocks that shall be + * written. */ type_disk: - return (u32)cdb[4]; + return cdb[4] ? : 256; } static inline u32 transport_get_sectors_10( @@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) return -1; } -static int -transport_handle_reservation_conflict(struct se_cmd *cmd) -{ - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; - cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; - /* - * For UA Interlock Code 11b, a RESERVATION CONFLICT will - * establish a UNIT ATTENTION with PREVIOUS RESERVATION - * CONFLICT STATUS. - * - * See spc4r17, section 7.4.6 Control Mode Page, Table 349 - */ - if (cmd->se_sess && - cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) - core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, - cmd->orig_fe_lun, 0x2C, - ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - return -EINVAL; -} - static inline long long transport_dev_end_lba(struct se_device *dev) { return dev->transport->get_blocks(dev) + 1; @@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer( */ if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( - cmd, cdb, pr_reg_type) != 0) - return transport_handle_reservation_conflict(cmd); + cmd, cdb, pr_reg_type) != 0) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; + return -EBUSY; + } /* * This means the CDB is allowed for the SCSI Initiator port * when said port is *NOT* holding the legacy SPC-2 or @@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->t_task_lba = transport_lba_32(cdb); - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_12: @@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->t_task_lba = transport_lba_32(cdb); - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_16: @@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->t_task_lba = transport_lba_64(cdb); - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case XDWRITEREAD_10: if ((cmd->data_direction != DMA_TO_DEVICE) || - !(cmd->t_tasks_bidi)) + !(cmd->se_cmd_flags & SCF_BIDI)) goto out_invalid_cdb_field; sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) @@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer( * Setup BIDI XOR callback to be run after I/O completion. */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_tasks_fua = (cdb[1] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; break; case VARIABLE_LENGTH_CMD: service_action = get_unaligned_be16(&cdb[8]); @@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer( * completion. */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_tasks_fua = (cdb[10] & 0x8); + if (cdb[1] & 0x8) + cmd->se_cmd_flags |= SCF_FUA; break; case WRITE_SAME_32: sectors = transport_get_sectors_32(cdb, cmd, §or_ret); @@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd) " SIMPLE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_dec(&dev->dev_hoq_count); - smp_mb__after_atomic_dec(); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for" " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&dev->ordered_cmd_lock); - list_del(&cmd->se_ordered_node); atomic_dec(&dev->dev_ordered_sync); smp_mb__after_atomic_dec(); - spin_unlock(&dev->ordered_cmd_lock); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" @@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd( if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { + /* + * Reject SCSI data overflow with map_mem_to_cmd() as incoming + * scatterlists already have been set to follow what the fabric + * passes for the original expected data transfer length. + */ + if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + pr_warn("Rejecting SCSI DATA overflow for fabric using" + " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; + } cmd->t_data_sg = sgl; cmd->t_data_nents = sgl_count; @@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd) cmd->data_length) { ret = transport_generic_get_mem(cmd); if (ret < 0) - return ret; + goto out_fail; } /* @@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd) task_cdbs = transport_allocate_control_task(cmd); } - if (task_cdbs <= 0) + if (task_cdbs < 0) goto out_fail; + else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { + cmd->t_state = TRANSPORT_COMPLETE; + atomic_set(&cmd->t_transport_active, 1); + INIT_WORK(&cmd->work, target_complete_ok_work); + queue_work(target_completion_wq, &cmd->work); + return 0; + } if (set_counts) { atomic_inc(&cmd->t_fe_count); @@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd) else if (ret < 0) return ret; - return PYX_TRANSPORT_WRITE_PENDING; + return 1; queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); @@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd) if (cmd->se_tfo->write_pending_status(cmd) != 0) { atomic_inc(&cmd->t_transport_aborted); smp_mb__after_atomic_inc(); - cmd->scsi_status = SAM_STAT_TASK_ABORTED; - transport_new_cmd_failure(cmd); - return; } } cmd->scsi_status = SAM_STAT_TASK_ABORTED; @@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param) struct se_cmd *cmd; struct se_device *dev = (struct se_device *) param; - set_user_nice(current, -20); - while (!kthread_should_stop()) { ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, atomic_read(&dev->dev_queue_obj.queue_cnt) || @@ -4698,18 +4625,13 @@ get_cmd: } ret = cmd->se_tfo->new_cmd_map(cmd); if (ret < 0) { - cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, - 0, (cmd->data_direction != - DMA_TO_DEVICE)); + transport_generic_request_failure(cmd); break; } ret = transport_generic_new_cmd(cmd); if (ret < 0) { - cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, - 0, (cmd->data_direction != - DMA_TO_DEVICE)); + transport_generic_request_failure(cmd); + break; } break; case TRANSPORT_PROCESS_WRITE: diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 4fac37c4c615..71fc9cea5dc9 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd) lport = ep->lp; fp = fc_frame_alloc(lport, sizeof(*txrdy)); if (!fp) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + return -ENOMEM; /* Signal QUEUE_FULL */ txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); memset(txrdy, 0, sizeof(*txrdy)); diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 5f770412ca40..9402b7387cac 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn) struct ft_lport_acl *lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); - pr_debug("del lport %s\n", - config_item_name(&wwn->wwn_group.cg_item)); + pr_debug("del lport %s\n", lacl->name); mutex_lock(&ft_lport_lock); list_del(&lacl->list); mutex_unlock(&ft_lport_lock); diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c index 435f6facbc23..44fbebab5075 100644 --- a/drivers/tty/hvc/hvc_dcc.c +++ b/drivers/tty/hvc/hvc_dcc.c @@ -46,6 +46,7 @@ static inline char __dcc_getchar(void) asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg" : "=r" (__c)); + isb(); return __c; } @@ -55,6 +56,7 @@ static inline void __dcc_putchar(char c) asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char" : /* no output register */ : "r" (c)); + isb(); } static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 4cb0d0a3e57b..fc7bbba585ce 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -66,14 +66,16 @@ static int debug; module_param(debug, int, 0600); -#define T1 (HZ/10) -#define T2 (HZ/3) -#define N2 3 +/* Defaults: these are from the specification */ + +#define T1 10 /* 100mS */ +#define T2 34 /* 333mS */ +#define N2 3 /* Retry 3 times */ /* Use long timers for testing at low speed with debug on */ #ifdef DEBUG_TIMING -#define T1 HZ -#define T2 (2 * HZ) +#define T1 100 +#define T2 200 #endif /* diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 5f479dada6f2..925a1e547a83 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1560,7 +1560,7 @@ config SERIAL_IFX6X60 Support for the IFX6x60 modem devices on Intel MID platforms. config SERIAL_PCH_UART - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223) UART" + tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) UART" depends on PCI select SERIAL_CORE help @@ -1568,12 +1568,12 @@ config SERIAL_PCH_UART which is an IOH(Input/Output Hub) for x86 embedded processor. Enabling PCH_DMA, this PCH UART works as DMA mode. - This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ - Output Hub), ML7213 and ML7223. - ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is - for MP(Media Phone) use. - ML7213/ML7223 is companion chip for Intel Atom E6xx series. - ML7213/ML7223 is completely compatible for Intel EG20T PCH. + This driver also can be used for LAPIS Semiconductor IOH(Input/ + Output Hub), ML7213, ML7223 and ML7831. + ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is + for MP(Media Phone) use and ML7831 IOH is for general purpose use. + ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. config SERIAL_MSM_SMD bool "Enable tty device interface for some SMD ports" diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 4a0f86fa1e90..4c823f341d98 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -228,7 +228,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) if (rs485conf->flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; - if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) + if ((rs485conf->delay_rts_after_send) > 0) UART_PUT_TTGR(port, rs485conf->delay_rts_after_send); mode |= ATMEL_US_USMODE_RS485; } else { @@ -304,7 +304,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) if (atmel_port->rs485.flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); - if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) + if ((atmel_port->rs485.delay_rts_after_send) > 0) UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_after_send); mode |= ATMEL_US_USMODE_RS485; @@ -1228,7 +1228,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, if (atmel_port->rs485.flags & SER_RS485_ENABLED) { dev_dbg(port->dev, "Setting UART to RS485\n"); - if (atmel_port->rs485.flags & SER_RS485_RTS_AFTER_SEND) + if ((atmel_port->rs485.delay_rts_after_send) > 0) UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_after_send); mode |= ATMEL_US_USMODE_RS485; @@ -1447,16 +1447,6 @@ static void __devinit atmel_of_init_port(struct atmel_uart_port *atmel_port, rs485conf->delay_rts_after_send = rs485_delay[1]; rs485conf->flags = 0; - if (rs485conf->delay_rts_before_send == 0 && - rs485conf->delay_rts_after_send == 0) { - rs485conf->flags |= SER_RS485_RTS_ON_SEND; - } else { - if (rs485conf->delay_rts_before_send) - rs485conf->flags |= SER_RS485_RTS_BEFORE_SEND; - if (rs485conf->delay_rts_after_send) - rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; - } - if (of_get_property(np, "rs485-rx-during-tx", NULL)) rs485conf->flags |= SER_RS485_RX_DURING_TX; diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c index b7435043f2fe..1dfba7b779c8 100644 --- a/drivers/tty/serial/crisv10.c +++ b/drivers/tty/serial/crisv10.c @@ -3234,9 +3234,8 @@ rs_write(struct tty_struct *tty, e100_disable_rx(info); e100_enable_rx_irq(info); #endif - if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) && - (info->rs485.delay_rts_before_send > 0)) - msleep(info->rs485.delay_rts_before_send); + if (info->rs485.delay_rts_before_send > 0) + msleep(info->rs485.delay_rts_before_send); } #endif /* CONFIG_ETRAX_RS485 */ @@ -3693,10 +3692,6 @@ rs_ioctl(struct tty_struct *tty, rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send; rs485data.flags = 0; - if (rs485data.delay_rts_before_send != 0) - rs485data.flags |= SER_RS485_RTS_BEFORE_SEND; - else - rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND); if (rs485ctrl.enabled) rs485data.flags |= SER_RS485_ENABLED; @@ -4531,7 +4526,6 @@ static int __init rs_init(void) /* Set sane defaults */ info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND); info->rs485.flags |= SER_RS485_RTS_AFTER_SEND; - info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND); info->rs485.delay_rts_before_send = 0; info->rs485.flags &= ~(SER_RS485_ENABLED); #endif diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c index 286c386d9c46..e272d3919c67 100644 --- a/drivers/tty/serial/mfd.c +++ b/drivers/tty/serial/mfd.c @@ -884,7 +884,6 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, { struct uart_hsu_port *up = container_of(port, struct uart_hsu_port, port); - struct tty_struct *tty = port->state->port.tty; unsigned char cval, fcr = 0; unsigned long flags; unsigned int baud, quot; @@ -907,8 +906,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, } /* CMSPAR isn't supported by this driver */ - if (tty) - tty->termios->c_cflag &= ~CMSPAR; + termios->c_cflag &= ~CMSPAR; if (termios->c_cflag & CSTOPB) cval |= UART_LCR_STOP; diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 21febef926aa..d6aba8c087e4 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -1,5 +1,5 @@ /* - *Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. + *Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. * *This program is free software; you can redistribute it and/or modify *it under the terms of the GNU General Public License as published by @@ -46,8 +46,8 @@ enum { /* Set the max number of UART port * Intel EG20T PCH: 4 port - * OKI SEMICONDUCTOR ML7213 IOH: 3 port - * OKI SEMICONDUCTOR ML7223 IOH: 2 port + * LAPIS Semiconductor ML7213 IOH: 3 port + * LAPIS Semiconductor ML7223 IOH: 2 port */ #define PCH_UART_NR 4 @@ -258,6 +258,8 @@ enum pch_uart_num_t { pch_ml7213_uart2, pch_ml7223_uart0, pch_ml7223_uart1, + pch_ml7831_uart0, + pch_ml7831_uart1, }; static struct pch_uart_driver_data drv_dat[] = { @@ -270,6 +272,8 @@ static struct pch_uart_driver_data drv_dat[] = { [pch_ml7213_uart2] = {PCH_UART_2LINE, 2}, [pch_ml7223_uart0] = {PCH_UART_8LINE, 0}, [pch_ml7223_uart1] = {PCH_UART_2LINE, 1}, + [pch_ml7831_uart0] = {PCH_UART_8LINE, 0}, + [pch_ml7831_uart1] = {PCH_UART_2LINE, 1}, }; static unsigned int default_baud = 9600; @@ -628,6 +632,7 @@ static void pch_request_dma(struct uart_port *port) dev_err(priv->port.dev, "%s:dma_request_channel FAILS(Rx)\n", __func__); dma_release_channel(priv->chan_tx); + priv->chan_tx = NULL; return; } @@ -1215,8 +1220,7 @@ static void pch_uart_shutdown(struct uart_port *port) dev_err(priv->port.dev, "pch_uart_hal_set_fifo Failed(ret=%d)\n", ret); - if (priv->use_dma_flag) - pch_free_dma(port); + pch_free_dma(port); free_irq(priv->port.irq, priv); } @@ -1280,6 +1284,7 @@ static void pch_uart_set_termios(struct uart_port *port, if (rtn) goto out; + pch_uart_set_mctrl(&priv->port, priv->port.mctrl); /* Don't rewrite B0 */ if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, baud, baud); @@ -1552,6 +1557,10 @@ static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = { .driver_data = pch_ml7223_uart0}, {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x800D), .driver_data = pch_ml7223_uart1}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8811), + .driver_data = pch_ml7831_uart0}, + {PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8812), + .driver_data = pch_ml7831_uart1}, {0,}, }; diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 1945c70539c2..aff9d612dff0 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -207,6 +207,25 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { }, /* + * Common SH-2(A) SCIF definitions for ports with FIFO data + * count registers. + */ + [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = { 0x20, 16 }, + [SCLSR] = { 0x24, 16 }, + }, + + /* * Common SH-3 SCIF definitions. */ [SCIx_SH3_SCIF_REGTYPE] = { diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 512c49f98e85..8e0924f55446 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -36,6 +36,7 @@ #include <linux/kmod.h> #include <linux/nsproxy.h> +#include <linux/ratelimit.h> /* * This guards the refcounted line discipline lists. The lock @@ -547,15 +548,16 @@ static void tty_ldisc_flush_works(struct tty_struct *tty) /** * tty_ldisc_wait_idle - wait for the ldisc to become idle * @tty: tty to wait for + * @timeout: for how long to wait at most * * Wait for the line discipline to become idle. The discipline must * have been halted for this to guarantee it remains idle. */ -static int tty_ldisc_wait_idle(struct tty_struct *tty) +static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout) { - int ret; + long ret; ret = wait_event_timeout(tty_ldisc_idle, - atomic_read(&tty->ldisc->users) == 1, 5 * HZ); + atomic_read(&tty->ldisc->users) == 1, timeout); if (ret < 0) return ret; return ret > 0 ? 0 : -EBUSY; @@ -665,7 +667,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) tty_ldisc_flush_works(tty); - retval = tty_ldisc_wait_idle(tty); + retval = tty_ldisc_wait_idle(tty, 5 * HZ); tty_lock(); mutex_lock(&tty->ldisc_mutex); @@ -762,8 +764,6 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc) if (IS_ERR(ld)) return -1; - WARN_ON_ONCE(tty_ldisc_wait_idle(tty)); - tty_ldisc_close(tty, tty->ldisc); tty_ldisc_put(tty->ldisc); tty->ldisc = NULL; @@ -838,7 +838,7 @@ void tty_ldisc_hangup(struct tty_struct *tty) tty_unlock(); cancel_work_sync(&tty->buf.work); mutex_unlock(&tty->ldisc_mutex); - +retry: tty_lock(); mutex_lock(&tty->ldisc_mutex); @@ -847,6 +847,22 @@ void tty_ldisc_hangup(struct tty_struct *tty) it means auditing a lot of other paths so this is a FIXME */ if (tty->ldisc) { /* Not yet closed */ + if (atomic_read(&tty->ldisc->users) != 1) { + char cur_n[TASK_COMM_LEN], tty_n[64]; + long timeout = 3 * HZ; + tty_unlock(); + + while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) { + timeout = MAX_SCHEDULE_TIMEOUT; + printk_ratelimited(KERN_WARNING + "%s: waiting (%s) for %s took too long, but we keep waiting...\n", + __func__, get_task_comm(cur_n, current), + tty_name(tty, tty_n)); + } + mutex_unlock(&tty->ldisc_mutex); + goto retry; + } + if (reset == 0) { if (!tty_ldisc_reinit(tty, tty->termios->c_line)) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 6960715c5063..e8c564a53346 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -539,7 +539,6 @@ static void acm_port_down(struct acm *acm) { int i; - mutex_lock(&open_mutex); if (acm->dev) { usb_autopm_get_interface(acm->control); acm_set_control(acm, acm->ctrlout = 0); @@ -551,14 +550,15 @@ static void acm_port_down(struct acm *acm) acm->control->needs_remote_wakeup = 0; usb_autopm_put_interface(acm->control); } - mutex_unlock(&open_mutex); } static void acm_tty_hangup(struct tty_struct *tty) { struct acm *acm = tty->driver_data; tty_port_hangup(&acm->port); + mutex_lock(&open_mutex); acm_port_down(acm); + mutex_unlock(&open_mutex); } static void acm_tty_close(struct tty_struct *tty, struct file *filp) @@ -569,8 +569,9 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp) shutdown */ if (!acm) return; + + mutex_lock(&open_mutex); if (tty_port_close_start(&acm->port, tty, filp) == 0) { - mutex_lock(&open_mutex); if (!acm->dev) { tty_port_tty_set(&acm->port, NULL); acm_tty_unregister(acm); @@ -582,6 +583,7 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp) acm_port_down(acm); tty_port_close_end(&acm->port, tty); tty_port_tty_set(&acm->port, NULL); + mutex_unlock(&open_mutex); } static int acm_tty_write(struct tty_struct *tty, diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 96f05b29c9ad..79781461eec9 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -813,6 +813,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) USB_PORT_FEAT_C_PORT_LINK_STATE); } + if ((portchange & USB_PORT_STAT_C_BH_RESET) && + hub_is_superspeed(hub->hdev)) { + need_debounce_delay = true; + clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_BH_PORT_RESET); + } /* We can forget about a "removed" device when there's a * physical disconnect or the connect status changes. */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index d6a8d8269bfb..ecf12e15a7ef 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -50,15 +50,42 @@ static const struct usb_device_id usb_quirk_list[] = { /* Logitech Webcam B/C500 */ { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech Webcam C600 */ + { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech Webcam Pro 9000 */ { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech Webcam C905 */ + { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Logitech Webcam C210 */ + { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Logitech Webcam C260 */ + { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech Webcam C310 */ { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech Webcam C910 */ + { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Logitech Webcam C160 */ + { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech Webcam C270 */ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech Quickcam Pro 9000 */ + { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Logitech Quickcam E3500 */ + { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Logitech Quickcam Vision Pro */ + { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech Harmony 700-series */ { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index fa824cfdd2eb..25dbd8614e72 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1284,6 +1284,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc) int ret; dep->endpoint.maxpacket = 1024; + dep->endpoint.max_streams = 15; dep->endpoint.ops = &dwc3_gadget_ep_ops; list_add_tail(&dep->endpoint.ep_list, &dwc->gadget.ep_list); diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index b21cd376c11a..23a447373c51 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -469,7 +469,7 @@ config USB_LANGWELL gadget drivers to also be dynamically linked. config USB_EG20T - tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC" + tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7831) UDC" depends on PCI select USB_GADGET_DUALSPEED help @@ -485,10 +485,11 @@ config USB_EG20T This driver dose not support interrupt transfer or isochronous transfer modes. - This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is + This driver also can be used for LAPIS Semiconductor's ML7213 which is for IVI(In-Vehicle Infotainment) use. - ML7213 is companion chip for Intel Atom E6xx series. - ML7213 is completely compatible for Intel EG20T PCH. + ML7831 is for general purpose use. + ML7213/ML7831 is companion chip for Intel Atom E6xx series. + ML7213/ML7831 is completely compatible for Intel EG20T PCH. config USB_CI13XXX_MSM tristate "MIPS USB CI13xxx for MSM" diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c index 4730016d7cd4..45f422ac103f 100644 --- a/drivers/usb/gadget/amd5536udc.c +++ b/drivers/usb/gadget/amd5536udc.c @@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver, u32 tmp; if (!driver || !bind || !driver->setup - || driver->speed != USB_SPEED_HIGH) + || driver->speed < USB_SPEED_HIGH) return -EINVAL; if (!dev) return -ENODEV; diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c index 4eedfe557154..1fc612914c52 100644 --- a/drivers/usb/gadget/ci13xxx_msm.c +++ b/drivers/usb/gadget/ci13xxx_msm.c @@ -122,3 +122,5 @@ static int __init ci13xxx_msm_init(void) return platform_driver_register(&ci13xxx_msm_driver); } module_init(ci13xxx_msm_init); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c index 83428f56253b..9a0c3979ff43 100644 --- a/drivers/usb/gadget/ci13xxx_udc.c +++ b/drivers/usb/gadget/ci13xxx_udc.c @@ -71,6 +71,9 @@ /****************************************************************************** * DEFINE *****************************************************************************/ + +#define DMA_ADDR_INVALID (~(dma_addr_t)0) + /* ctrl register bank access */ static DEFINE_SPINLOCK(udc_lock); @@ -1434,7 +1437,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) return -EALREADY; mReq->req.status = -EALREADY; - if (length && !mReq->req.dma) { + if (length && mReq->req.dma == DMA_ADDR_INVALID) { mReq->req.dma = \ dma_map_single(mEp->device, mReq->req.buf, length, mEp->dir ? DMA_TO_DEVICE : @@ -1453,7 +1456,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) dma_unmap_single(mEp->device, mReq->req.dma, length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - mReq->req.dma = 0; + mReq->req.dma = DMA_ADDR_INVALID; mReq->map = 0; } return -ENOMEM; @@ -1549,7 +1552,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) if (mReq->map) { dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - mReq->req.dma = 0; + mReq->req.dma = DMA_ADDR_INVALID; mReq->map = 0; } @@ -1610,7 +1613,6 @@ __acquires(mEp->lock) * @gadget: gadget * * This function returns an error code - * Caller must hold lock */ static int _gadget_stop_activity(struct usb_gadget *gadget) { @@ -2189,6 +2191,7 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); if (mReq != NULL) { INIT_LIST_HEAD(&mReq->queue); + mReq->req.dma = DMA_ADDR_INVALID; mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, &mReq->dma); @@ -2328,7 +2331,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) if (mReq->map) { dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length, mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - mReq->req.dma = 0; + mReq->req.dma = DMA_ADDR_INVALID; mReq->map = 0; } req->status = -ECONNRESET; @@ -2500,12 +2503,12 @@ static int ci13xxx_wakeup(struct usb_gadget *_gadget) spin_lock_irqsave(udc->lock, flags); if (!udc->remote_wakeup) { ret = -EOPNOTSUPP; - dbg_trace("remote wakeup feature is not enabled\n"); + trace("remote wakeup feature is not enabled\n"); goto out; } if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) { ret = -EINVAL; - dbg_trace("port is not suspended\n"); + trace("port is not suspended\n"); goto out; } hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR); @@ -2703,7 +2706,9 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver) if (udc->udc_driver->notify_event) udc->udc_driver->notify_event(udc, CI13XXX_CONTROLLER_STOPPED_EVENT); + spin_unlock_irqrestore(udc->lock, flags); _gadget_stop_activity(&udc->gadget); + spin_lock_irqsave(udc->lock, flags); pm_runtime_put(&udc->gadget.dev); } @@ -2850,7 +2855,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev, struct ci13xxx *udc; int retval = 0; - trace("%p, %p, %p", dev, regs, name); + trace("%p, %p, %p", dev, regs, driver->name); if (dev == NULL || regs == NULL || driver == NULL || driver->name == NULL) diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index 52583a235330..c39d58860fa0 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -624,7 +624,8 @@ static int fsg_setup(struct usb_function *f, if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) break; - if (w_index != fsg->interface_number || w_value != 0) + if (w_index != fsg->interface_number || w_value != 0 || + w_length != 0) return -EDOM; /* @@ -639,7 +640,8 @@ static int fsg_setup(struct usb_function *f, if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) break; - if (w_index != fsg->interface_number || w_value != 0) + if (w_index != fsg->interface_number || w_value != 0 || + w_length != 1) return -EDOM; VDBG(fsg, "get max LUN\n"); *(u8 *)req->buf = fsg->common->nluns - 1; diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c index 67b222908cf9..3797b3d6c622 100644 --- a/drivers/usb/gadget/f_midi.c +++ b/drivers/usb/gadget/f_midi.c @@ -95,7 +95,6 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req); DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); -DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(16); DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16); /* B.3.1 Standard AC Interface Descriptor */ @@ -140,26 +139,6 @@ static struct usb_ms_header_descriptor ms_header_desc __initdata = { /* .wTotalLength = DYNAMIC */ }; -/* B.4.3 Embedded MIDI IN Jack Descriptor */ -static struct usb_midi_in_jack_descriptor jack_in_emb_desc = { - .bLength = USB_DT_MIDI_IN_SIZE, - .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubtype = USB_MS_MIDI_IN_JACK, - .bJackType = USB_MS_EMBEDDED, - /* .bJackID = DYNAMIC */ -}; - -/* B.4.4 Embedded MIDI OUT Jack Descriptor */ -static struct usb_midi_out_jack_descriptor_16 jack_out_emb_desc = { - /* .bLength = DYNAMIC */ - .bDescriptorType = USB_DT_CS_INTERFACE, - .bDescriptorSubtype = USB_MS_MIDI_OUT_JACK, - .bJackType = USB_MS_EMBEDDED, - /* .bJackID = DYNAMIC */ - /* .bNrInputPins = DYNAMIC */ - /* .pins = DYNAMIC */ -}; - /* B.5.1 Standard Bulk OUT Endpoint Descriptor */ static struct usb_endpoint_descriptor bulk_out_desc = { .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, @@ -758,9 +737,11 @@ fail: static int __init f_midi_bind(struct usb_configuration *c, struct usb_function *f) { - struct usb_descriptor_header *midi_function[(MAX_PORTS * 2) + 12]; + struct usb_descriptor_header **midi_function; struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS]; + struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS]; struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS]; + struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS]; struct usb_composite_dev *cdev = c->cdev; struct f_midi *midi = func_to_midi(f); int status, n, jack = 1, i = 0; @@ -798,6 +779,14 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f) goto fail; midi->out_ep->driver_data = cdev; /* claim */ + /* allocate temporary function list */ + midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(midi_function), + GFP_KERNEL); + if (!midi_function) { + status = -ENOMEM; + goto fail; + } + /* * construct the function's descriptor set. As the number of * input and output MIDI ports is configurable, we have to do @@ -811,73 +800,74 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f) /* calculate the header's wTotalLength */ n = USB_DT_MS_HEADER_SIZE - + (1 + midi->in_ports) * USB_DT_MIDI_IN_SIZE - + (1 + midi->out_ports) * USB_DT_MIDI_OUT_SIZE(1); + + (midi->in_ports + midi->out_ports) * + (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1)); ms_header_desc.wTotalLength = cpu_to_le16(n); midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc; - /* we have one embedded IN jack */ - jack_in_emb_desc.bJackID = jack++; - midi_function[i++] = (struct usb_descriptor_header *) &jack_in_emb_desc; - - /* and a dynamic amount of external IN jacks */ - for (n = 0; n < midi->in_ports; n++) { - struct usb_midi_in_jack_descriptor *ext = &jack_in_ext_desc[n]; - - ext->bLength = USB_DT_MIDI_IN_SIZE; - ext->bDescriptorType = USB_DT_CS_INTERFACE; - ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK; - ext->bJackType = USB_MS_EXTERNAL; - ext->bJackID = jack++; - ext->iJack = 0; - - midi_function[i++] = (struct usb_descriptor_header *) ext; - } - - /* one embedded OUT jack ... */ - jack_out_emb_desc.bLength = USB_DT_MIDI_OUT_SIZE(midi->in_ports); - jack_out_emb_desc.bJackID = jack++; - jack_out_emb_desc.bNrInputPins = midi->in_ports; - /* ... which referencess all external IN jacks */ + /* configure the external IN jacks, each linked to an embedded OUT jack */ for (n = 0; n < midi->in_ports; n++) { - jack_out_emb_desc.pins[n].baSourceID = jack_in_ext_desc[n].bJackID; - jack_out_emb_desc.pins[n].baSourcePin = 1; + struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n]; + struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n]; + + in_ext->bLength = USB_DT_MIDI_IN_SIZE; + in_ext->bDescriptorType = USB_DT_CS_INTERFACE; + in_ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK; + in_ext->bJackType = USB_MS_EXTERNAL; + in_ext->bJackID = jack++; + in_ext->iJack = 0; + midi_function[i++] = (struct usb_descriptor_header *) in_ext; + + out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1); + out_emb->bDescriptorType = USB_DT_CS_INTERFACE; + out_emb->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; + out_emb->bJackType = USB_MS_EMBEDDED; + out_emb->bJackID = jack++; + out_emb->bNrInputPins = 1; + out_emb->pins[0].baSourcePin = 1; + out_emb->pins[0].baSourceID = in_ext->bJackID; + out_emb->iJack = 0; + midi_function[i++] = (struct usb_descriptor_header *) out_emb; + + /* link it to the endpoint */ + ms_in_desc.baAssocJackID[n] = out_emb->bJackID; } - midi_function[i++] = (struct usb_descriptor_header *) &jack_out_emb_desc; - - /* and multiple external OUT jacks ... */ + /* configure the external OUT jacks, each linked to an embedded IN jack */ for (n = 0; n < midi->out_ports; n++) { - struct usb_midi_out_jack_descriptor_1 *ext = &jack_out_ext_desc[n]; - int m; - - ext->bLength = USB_DT_MIDI_OUT_SIZE(1); - ext->bDescriptorType = USB_DT_CS_INTERFACE; - ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; - ext->bJackType = USB_MS_EXTERNAL; - ext->bJackID = jack++; - ext->bNrInputPins = 1; - ext->iJack = 0; - /* ... which all reference the same embedded IN jack */ - for (m = 0; m < midi->out_ports; m++) { - ext->pins[m].baSourceID = jack_in_emb_desc.bJackID; - ext->pins[m].baSourcePin = 1; - } - - midi_function[i++] = (struct usb_descriptor_header *) ext; + struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n]; + struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n]; + + in_emb->bLength = USB_DT_MIDI_IN_SIZE; + in_emb->bDescriptorType = USB_DT_CS_INTERFACE; + in_emb->bDescriptorSubtype = USB_MS_MIDI_IN_JACK; + in_emb->bJackType = USB_MS_EMBEDDED; + in_emb->bJackID = jack++; + in_emb->iJack = 0; + midi_function[i++] = (struct usb_descriptor_header *) in_emb; + + out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1); + out_ext->bDescriptorType = USB_DT_CS_INTERFACE; + out_ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; + out_ext->bJackType = USB_MS_EXTERNAL; + out_ext->bJackID = jack++; + out_ext->bNrInputPins = 1; + out_ext->iJack = 0; + out_ext->pins[0].baSourceID = in_emb->bJackID; + out_ext->pins[0].baSourcePin = 1; + midi_function[i++] = (struct usb_descriptor_header *) out_ext; + + /* link it to the endpoint */ + ms_out_desc.baAssocJackID[n] = in_emb->bJackID; } /* configure the endpoint descriptors ... */ ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports); ms_out_desc.bNumEmbMIDIJack = midi->in_ports; - for (n = 0; n < midi->in_ports; n++) - ms_out_desc.baAssocJackID[n] = jack_in_emb_desc.bJackID; ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports); ms_in_desc.bNumEmbMIDIJack = midi->out_ports; - for (n = 0; n < midi->out_ports; n++) - ms_in_desc.baAssocJackID[n] = jack_out_emb_desc.bJackID; /* ... and add them to the list */ midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc; @@ -901,6 +891,8 @@ f_midi_bind(struct usb_configuration *c, struct usb_function *f) f->descriptors = usb_copy_descriptors(midi_function); } + kfree(midi_function); + return 0; fail: diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 349077033338..16a509ae517b 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c @@ -346,7 +346,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - skb->len == 0, req->actual); + skb->len <= 1, req->actual); page = NULL; if (req->actual < req->length) { /* Last fragment */ diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c index 91fdf790ed20..cf33a8d0fd5d 100644 --- a/drivers/usb/gadget/f_serial.c +++ b/drivers/usb/gadget/f_serial.c @@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt) } if (!gser->port.in->desc || !gser->port.out->desc) { DBG(cdev, "activate generic ttyGS%d\n", gser->port_num); - if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) || - !config_ep_by_speed(cdev->gadget, f, gser->port.out)) { + if (config_ep_by_speed(cdev->gadget, f, gser->port.in) || + config_ep_by_speed(cdev->gadget, f, gser->port.out)) { gser->port.in->desc = NULL; gser->port.out->desc = NULL; return -EINVAL; diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c index f7e39b0365ce..11b5196284ae 100644 --- a/drivers/usb/gadget/file_storage.c +++ b/drivers/usb/gadget/file_storage.c @@ -859,7 +859,7 @@ static int class_setup_req(struct fsg_dev *fsg, if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) break; - if (w_index != 0 || w_value != 0) { + if (w_index != 0 || w_value != 0 || w_length != 0) { value = -EDOM; break; } @@ -875,7 +875,7 @@ static int class_setup_req(struct fsg_dev *fsg, if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) break; - if (w_index != 0 || w_value != 0) { + if (w_index != 0 || w_value != 0 || w_length != 1) { value = -EDOM; break; } diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c index 43a49ecc1f36..dcbc0a2e48dd 100644 --- a/drivers/usb/gadget/fsl_mxc_udc.c +++ b/drivers/usb/gadget/fsl_mxc_udc.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/fsl_devices.h> #include <linux/platform_device.h> +#include <linux/io.h> #include <mach/hardware.h> @@ -88,7 +89,6 @@ eenahb: void fsl_udc_clk_finalize(struct platform_device *pdev) { struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data; -#if defined(CONFIG_SOC_IMX35) if (cpu_is_mx35()) { unsigned int v; @@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev) USBPHYCTRL_OTGBASE_OFFSET)); } } -#endif /* ULPI transceivers don't need usbpll */ if (pdata->phy_mode == FSL_USB2_PHY_ULPI) { diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 2a03e4de11c1..e00cf92409ce 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c @@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver, if (!udc_controller) return -ENODEV; - if (!driver || (driver->speed != USB_SPEED_FULL - && driver->speed != USB_SPEED_HIGH) + if (!driver || driver->speed < USB_SPEED_FULL || !bind || !driver->disconnect || !driver->setup) return -EINVAL; diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index b2c44e1d5813..dd28ef3def71 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c @@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req) kfree(req); } -/*-------------------------------------------------------------------------*/ +/* Actually add a dTD chain to an empty dQH and let go */ +static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td) +{ + struct ep_queue_head *qh = get_qh_by_ep(ep); + + /* Write dQH next pointer and terminate bit to 0 */ + qh->next_dtd_ptr = cpu_to_hc32(td->td_dma + & EP_QUEUE_HEAD_NEXT_POINTER_MASK); + + /* Clear active and halt bit */ + qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE + | EP_QUEUE_HEAD_STATUS_HALT)); + + /* Ensure that updates to the QH will occur before priming. */ + wmb(); + + /* Prime endpoint by writing correct bit to ENDPTPRIME */ + fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16)) + : (1 << (ep_index(ep))), &dr_regs->endpointprime); +} + +/* Add dTD chain to the dQH of an EP */ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) { - int i = ep_index(ep) * 2 + ep_is_in(ep); u32 temp, bitmask, tmp_stat; - struct ep_queue_head *dQH = &ep->udc->ep_qh[i]; /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr); VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */ @@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK); /* Read prime bit, if 1 goto done */ if (fsl_readl(&dr_regs->endpointprime) & bitmask) - goto out; + return; do { /* Set ATDTW bit in USBCMD */ @@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd); if (tmp_stat) - goto out; + return; } - /* Write dQH next pointer and terminate bit to 0 */ - temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; - dQH->next_dtd_ptr = cpu_to_hc32(temp); - - /* Clear active and halt bit */ - temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE - | EP_QUEUE_HEAD_STATUS_HALT)); - dQH->size_ioc_int_sts &= temp; - - /* Ensure that updates to the QH will occur before priming. */ - wmb(); - - /* Prime endpoint by writing 1 to ENDPTPRIME */ - temp = ep_is_in(ep) - ? (1 << (ep_index(ep) + 16)) - : (1 << (ep_index(ep))); - fsl_writel(temp, &dr_regs->endpointprime); -out: - return; + fsl_prime_ep(ep, req->head); } /* Fill in the dTD structure @@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) VDBG("%s, bad ep", __func__); return -EINVAL; } - if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { + if (usb_endpoint_xfer_isoc(ep->desc)) { if (req->req.length > ep->ep.maxpacket) return -EMSGSIZE; } @@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) /* The request isn't the last request in this ep queue */ if (req->queue.next != &ep->queue) { - struct ep_queue_head *qh; struct fsl_req *next_req; - qh = ep->qh; next_req = list_entry(req->queue.next, struct fsl_req, queue); - /* Point the QH to the first TD of next request */ - fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr); + /* prime with dTD of next request */ + fsl_prime_ep(ep, next_req->head); } - - /* The request hasn't been processed, patch up the TD chain */ + /* The request hasn't been processed, patch up the TD chain */ } else { struct fsl_req *prev_req; prev_req = list_entry(req->queue.prev, struct fsl_req, queue); - fsl_writel(fsl_readl(&req->tail->next_td_ptr), - &prev_req->tail->next_td_ptr); - + prev_req->tail->next_td_ptr = req->tail->next_td_ptr; } done(ep, req, -ECONNRESET); @@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value) goto out; } - if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { + if (usb_endpoint_xfer_isoc(ep->desc)) { status = -EOPNOTSUPP; goto out; } @@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep) struct fsl_udc *udc; int size = 0; u32 bitmask; - struct ep_queue_head *d_qh; + struct ep_queue_head *qh; ep = container_of(_ep, struct fsl_ep, ep); if (!_ep || (!ep->desc && ep_index(ep) != 0)) @@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep) if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; - d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)]; + qh = get_qh_by_ep(ep); bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) : (1 << (ep_index(ep))); if (fsl_readl(&dr_regs->endptstatus) & bitmask) - size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE) + size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE) >> DTD_LENGTH_BIT_POS; pr_debug("%s %u\n", __func__, size); @@ -1717,7 +1713,7 @@ static void dtd_complete_irq(struct fsl_udc *udc) static inline enum usb_device_speed portscx_device_speed(u32 reg) { - switch (speed & PORTSCX_PORT_SPEED_MASK) { + switch (reg & PORTSCX_PORT_SPEED_MASK) { case PORTSCX_PORT_SPEED_HIGH: return USB_SPEED_HIGH; case PORTSCX_PORT_SPEED_FULL: @@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver, if (!udc_controller) return -ENODEV; - if (!driver || (driver->speed != USB_SPEED_FULL - && driver->speed != USB_SPEED_HIGH) + if (!driver || driver->speed < USB_SPEED_FULL || !bind || !driver->disconnect || !driver->setup) return -EINVAL; @@ -2480,8 +2475,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev) #ifndef CONFIG_ARCH_MXC if (pdata->have_sysif_regs) - usb_sys_regs = (struct usb_sys_interface *) - ((u32)dr_regs + USB_DR_SYS_OFFSET); + usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET; #endif /* Initialize USB clocks */ diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h index 1d51be83fda8..f781f5dec417 100644 --- a/drivers/usb/gadget/fsl_usb2_udc.h +++ b/drivers/usb/gadget/fsl_usb2_udc.h @@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length) * 2 + ((windex & USB_DIR_IN) ? 1 : 0)) #define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP)) +static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep) +{ + /* we only have one ep0 structure but two queue heads */ + if (ep_index(ep) != 0) + return ep->qh; + else + return &ep->udc->ep_qh[(ep->udc->ep0_dir == + USB_DIR_IN) ? 1 : 0]; +} + struct platform_device; #ifdef CONFIG_ARCH_MXC int fsl_udc_clk_init(struct platform_device *pdev); diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index a392ec0d2d51..6ccae2707e59 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -1730,8 +1730,9 @@ static void gadgetfs_disconnect (struct usb_gadget *gadget) { struct dev_data *dev = get_gadget_data (gadget); + unsigned long flags; - spin_lock (&dev->lock); + spin_lock_irqsave (&dev->lock, flags); if (dev->state == STATE_DEV_UNCONNECTED) goto exit; dev->state = STATE_DEV_UNCONNECTED; @@ -1740,7 +1741,7 @@ gadgetfs_disconnect (struct usb_gadget *gadget) next_event (dev, GADGETFS_DISCONNECT); ep0_readable (dev); exit: - spin_unlock (&dev->lock); + spin_unlock_irqrestore (&dev->lock, flags); } static void diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c index 91d0af2a24a8..9aa1cbbee45b 100644 --- a/drivers/usb/gadget/m66592-udc.c +++ b/drivers/usb/gadget/m66592-udc.c @@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver, int retval; if (!driver - || driver->speed != USB_SPEED_HIGH + || driver->speed < USB_SPEED_HIGH || !bind || !driver->setup) return -EINVAL; diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c index 7f1bc9a73cda..da2b9d0be3ca 100644 --- a/drivers/usb/gadget/net2280.c +++ b/drivers/usb/gadget/net2280.c @@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget, * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE) * "must not be used in normal operation" */ - if (!driver || driver->speed != USB_SPEED_HIGH + if (!driver || driver->speed < USB_SPEED_HIGH || !driver->setup) return -EINVAL; diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c index 550d6dcdf104..5048a0c07640 100644 --- a/drivers/usb/gadget/pch_udc.c +++ b/drivers/usb/gadget/pch_udc.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. + * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -354,6 +354,7 @@ struct pch_udc_dev { #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 #define PCI_VENDOR_ID_ROHM 0x10DB #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D +#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808 static const char ep0_string[] = "ep0in"; static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */ @@ -2970,6 +2971,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = { .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, .class_mask = 0xffffffff, }, + { + PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC), + .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, + .class_mask = 0xffffffff, + }, { 0 }, }; @@ -2999,5 +3005,5 @@ static void __exit pch_udc_pci_exit(void) module_exit(pch_udc_pci_exit); MODULE_DESCRIPTION("Intel EG20T USB Device Controller"); -MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>"); +MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 68a826a1b866..fc719a3f8557 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c @@ -1718,6 +1718,8 @@ static void r8a66597_fifo_flush(struct usb_ep *_ep) if (list_empty(&ep->queue) && !ep->busy) { pipe_stop(ep->r8a66597, ep->pipenum); r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr); + r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr); + r8a66597_write(ep->r8a66597, 0, ep->pipectr); } spin_unlock_irqrestore(&ep->r8a66597->lock, flags); } @@ -1742,26 +1744,16 @@ static int r8a66597_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget); - int retval; if (!driver - || driver->speed != USB_SPEED_HIGH + || driver->speed < USB_SPEED_HIGH || !driver->setup) return -EINVAL; if (!r8a66597) return -ENODEV; /* hook up the driver */ - driver->driver.bus = NULL; r8a66597->driver = driver; - r8a66597->gadget.dev.driver = &driver->driver; - - retval = device_add(&r8a66597->gadget.dev); - if (retval) { - dev_err(r8a66597_to_dev(r8a66597), "device_add error (%d)\n", - retval); - goto error; - } init_controller(r8a66597); r8a66597_bset(r8a66597, VBSE, INTENB0); @@ -1775,12 +1767,6 @@ static int r8a66597_start(struct usb_gadget *gadget, } return 0; - -error: - r8a66597->driver = NULL; - r8a66597->gadget.dev.driver = NULL; - - return retval; } static int r8a66597_stop(struct usb_gadget *gadget, @@ -1794,7 +1780,6 @@ static int r8a66597_stop(struct usb_gadget *gadget, disable_controller(r8a66597); spin_unlock_irqrestore(&r8a66597->lock, flags); - device_del(&r8a66597->gadget.dev); r8a66597->driver = NULL; return 0; } @@ -1845,6 +1830,7 @@ static int __exit r8a66597_remove(struct platform_device *pdev) clk_put(r8a66597->clk); } #endif + device_unregister(&r8a66597->gadget.dev); kfree(r8a66597); return 0; } @@ -1924,13 +1910,17 @@ static int __init r8a66597_probe(struct platform_device *pdev) r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; r8a66597->gadget.ops = &r8a66597_gadget_ops; - device_initialize(&r8a66597->gadget.dev); dev_set_name(&r8a66597->gadget.dev, "gadget"); r8a66597->gadget.is_dualspeed = 1; r8a66597->gadget.dev.parent = &pdev->dev; r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask; r8a66597->gadget.dev.release = pdev->dev.release; r8a66597->gadget.name = udc_name; + ret = device_register(&r8a66597->gadget.dev); + if (ret < 0) { + dev_err(&pdev->dev, "device_register failed\n"); + goto clean_up; + } init_timer(&r8a66597->timer); r8a66597->timer.function = r8a66597_timer; @@ -1945,7 +1935,7 @@ static int __init r8a66597_probe(struct platform_device *pdev) dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); ret = PTR_ERR(r8a66597->clk); - goto clean_up; + goto clean_up_dev; } clk_enable(r8a66597->clk); } @@ -2014,7 +2004,9 @@ clean_up2: clk_disable(r8a66597->clk); clk_put(r8a66597->clk); } +clean_up_dev: #endif + device_unregister(&r8a66597->gadget.dev); clean_up: if (r8a66597) { if (r8a66597->sudmac_reg) diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index a552453dc946..b31448229f0b 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c @@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver, return -EINVAL; } - if (driver->speed != USB_SPEED_HIGH && - driver->speed != USB_SPEED_FULL) { + if (driver->speed < USB_SPEED_FULL) dev_err(hsotg->dev, "%s: bad speed\n", __func__); - } if (!bind || !driver->setup) { dev_err(hsotg->dev, "%s: missing entry points\n", __func__); diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c index 8d54f893cefe..20a553b46aed 100644 --- a/drivers/usb/gadget/s3c-hsudc.c +++ b/drivers/usb/gadget/s3c-hsudc.c @@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver, int ret; if (!driver - || (driver->speed != USB_SPEED_FULL && - driver->speed != USB_SPEED_HIGH) + || driver->speed < USB_SPEED_FULL || !bind || !driver->unbind || !driver->disconnect || !driver->setup) return -EINVAL; diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c index 022baeca7c94..6939e17f4580 100644 --- a/drivers/usb/gadget/udc-core.c +++ b/drivers/usb/gadget/udc-core.c @@ -210,10 +210,10 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); if (udc_is_newstyle(udc)) { - usb_gadget_disconnect(udc->gadget); + udc->driver->disconnect(udc->gadget); udc->driver->unbind(udc->gadget); usb_gadget_udc_stop(udc->gadget, udc->driver); - + usb_gadget_disconnect(udc->gadget); } else { usb_gadget_stop(udc->gadget, udc->driver); } @@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver); static ssize_t usb_udc_srp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { - struct usb_udc *udc = dev_get_drvdata(dev); + struct usb_udc *udc = container_of(dev, struct usb_udc, dev); if (sysfs_streq(buf, "1")) usb_gadget_wakeup(udc->gadget); @@ -378,7 +378,7 @@ static ssize_t usb_udc_speed_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%s\n", usb_speed_string(udc->gadget->speed)); } -static DEVICE_ATTR(speed, S_IRUSR, usb_udc_speed_show, NULL); +static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL); #define USB_UDC_ATTR(name) \ ssize_t usb_udc_##name##_show(struct device *dev, \ @@ -389,7 +389,7 @@ ssize_t usb_udc_##name##_show(struct device *dev, \ \ return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \ } \ -static DEVICE_ATTR(name, S_IRUSR, usb_udc_##name##_show, NULL) +static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL) static USB_UDC_ATTR(is_dualspeed); static USB_UDC_ATTR(is_otg); diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 2e829fae6482..a60679cbbf85 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1475,30 +1475,36 @@ iso_stream_schedule ( * jump until after the queue is primed. */ else { + int done = 0; start = SCHEDULE_SLOP + (now & ~0x07); /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ - /* find a uframe slot with enough bandwidth */ - next = start + period; - for (; start < next; start++) { - + /* find a uframe slot with enough bandwidth. + * Early uframes are more precious because full-speed + * iso IN transfers can't use late uframes, + * and therefore they should be allocated last. + */ + next = start; + start += period; + do { + start--; /* check schedule: enough space? */ if (stream->highspeed) { if (itd_slot_ok(ehci, mod, start, stream->usecs, period)) - break; + done = 1; } else { if ((start % 8) >= 6) continue; if (sitd_slot_ok(ehci, mod, stream, start, sched, period)) - break; + done = 1; } - } + } while (start > next && !done); /* no room in the schedule */ - if (start == next) { + if (!done) { ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n", urb, now, now + mod); status = -ENOSPC; diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c index fe74bd676018..b4fb511d24bc 100644 --- a/drivers/usb/host/ehci-xls.c +++ b/drivers/usb/host/ehci-xls.c @@ -19,7 +19,7 @@ static int ehci_xls_setup(struct usb_hcd *hcd) ehci->caps = hcd->regs; ehci->regs = hcd->regs + - HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); dbg_hcs_params(ehci, "reset"); dbg_hcc_params(ehci, "reset"); diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index ba3a46b78b75..95a9fec38e89 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -223,6 +223,9 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int if (port < 0 || port >= 2) return; + if (pdata->vbus_pin[port] <= 0) + return; + gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable); } @@ -231,6 +234,9 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port) if (port < 0 || port >= 2) return -EINVAL; + if (pdata->vbus_pin[port] <= 0) + return -EINVAL; + return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted; } diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 34efd479e068..b2639191549e 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd) struct ohci_hcd *ohci; ohci = hcd_to_ohci (hcd); - ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); - ohci->hc_control = ohci_readl(ohci, &ohci->regs->control); + ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable); - /* If the SHUTDOWN quirk is set, don't put the controller in RESET */ - ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ? - OHCI_CTRL_RWC | OHCI_CTRL_HCFS : - OHCI_CTRL_RWC); - ohci_writel(ohci, ohci->hc_control, &ohci->regs->control); + /* Software reset, after which the controller goes into SUSPEND */ + ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus); + ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */ + udelay(10); - /* flush the writes */ - (void) ohci_readl (ohci, &ohci->regs->control); + ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval); } static int check_ed(struct ohci_hcd *ohci, struct ed *ed) diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index ad8166c681e2..bc01b064585a 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) return 0; } -/* nVidia controllers continue to drive Reset signalling on the bus - * even after system shutdown, wasting power. This flag tells the - * shutdown routine to leave the controller OPERATIONAL instead of RESET. - */ -static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd) -{ - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - struct ohci_hcd *ohci = hcd_to_ohci(hcd); - - /* Evidently nVidia fixed their later hardware; this is a guess at - * the changeover point. - */ -#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d - - if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) { - ohci->flags |= OHCI_QUIRK_SHUTDOWN; - ohci_dbg(ohci, "enabled nVidia shutdown quirk\n"); - } - - return 0; -} - static void sb800_prefetch(struct ohci_hcd *ohci, int on) { struct pci_dev *pdev; @@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = { PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), .driver_data = (unsigned long)ohci_quirk_amd700, }, - { - PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), - .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown, - }, /* FIXME for some of the early AMD 760 southbridges, OHCI * won't work at all. blacklist them. diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index 35e5fd640ce7..0795b934d00c 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h @@ -403,7 +403,6 @@ struct ohci_hcd { #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ -#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */ // there are also chip quirks/bugs in init logic struct work_struct nec_work; /* Worker for NEC quirk */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 27a3dec32fa2..caf87428ca43 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -37,6 +37,7 @@ #define OHCI_INTRENABLE 0x10 #define OHCI_INTRDISABLE 0x14 #define OHCI_FMINTERVAL 0x34 +#define OHCI_HCFS (3 << 6) /* hc functional state */ #define OHCI_HCR (1 << 0) /* host controller reset */ #define OHCI_OCR (1 << 3) /* ownership change request */ #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ @@ -466,6 +467,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) { void __iomem *base; u32 control; + u32 fminterval; + int cnt; if (!mmio_resource_enabled(pdev, 0)) return; @@ -498,41 +501,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) } #endif - /* reset controller, preserving RWC (and possibly IR) */ - writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); - readl(base + OHCI_CONTROL); + /* disable interrupts */ + writel((u32) ~0, base + OHCI_INTRDISABLE); - /* Some NVIDIA controllers stop working if kept in RESET for too long */ - if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { - u32 fminterval; - int cnt; + /* Reset the USB bus, if the controller isn't already in RESET */ + if (control & OHCI_HCFS) { + /* Go into RESET, preserving RWC (and possibly IR) */ + writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); + readl(base + OHCI_CONTROL); - /* drive reset for at least 50 ms (7.1.7.5) */ + /* drive bus reset for at least 50 ms (7.1.7.5) */ msleep(50); + } - /* software reset of the controller, preserving HcFmInterval */ - fminterval = readl(base + OHCI_FMINTERVAL); - writel(OHCI_HCR, base + OHCI_CMDSTATUS); + /* software reset of the controller, preserving HcFmInterval */ + fminterval = readl(base + OHCI_FMINTERVAL); + writel(OHCI_HCR, base + OHCI_CMDSTATUS); - /* reset requires max 10 us delay */ - for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ - if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) - break; - udelay(1); - } - writel(fminterval, base + OHCI_FMINTERVAL); - - /* Now we're in the SUSPEND state with all devices reset - * and wakeups and interrupts disabled - */ + /* reset requires max 10 us delay */ + for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ + if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) + break; + udelay(1); } + writel(fminterval, base + OHCI_FMINTERVAL); - /* - * disable interrupts - */ - writel(~(u32)0, base + OHCI_INTRDISABLE); - writel(~(u32)0, base + OHCI_INTRSTATUS); - + /* Now the controller is safely in SUSPEND and nothing can wake it up */ iounmap(base); } @@ -627,7 +621,7 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) void __iomem *base, *op_reg_base; u32 hcc_params, cap, val; u8 offset, cap_length; - int wait_time, delta, count = 256/4; + int wait_time, count = 256/4; if (!mmio_resource_enabled(pdev, 0)) return; @@ -673,11 +667,10 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) writel(val, op_reg_base + EHCI_USBCMD); wait_time = 2000; - delta = 100; do { writel(0x3f, op_reg_base + EHCI_USBSTS); - udelay(delta); - wait_time -= delta; + udelay(100); + wait_time -= 100; val = readl(op_reg_base + EHCI_USBSTS); if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { break; diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index d6e175428618..a403b53e86b9 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c @@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset) { qset->td_start = qset->td_end = qset->ntds = 0; - qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); + qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T); qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; qset->qh.err_count = 0; qset->qh.scratch[0] = 0; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 42a22b8e6922..0e4b25fa3bcd 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -982,7 +982,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud struct xhci_virt_device *dev; struct xhci_ep_ctx *ep0_ctx; struct xhci_slot_ctx *slot_ctx; - struct xhci_input_control_ctx *ctrl_ctx; u32 port_num; struct usb_device *top_dev; @@ -994,12 +993,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud return -EINVAL; } ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); - ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); - /* 2) New slot context and endpoint 0 context are valid*/ - ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); - /* 3) Only the control endpoint is valid - one endpoint context */ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); switch (udev->speed) { diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 940321b3ec68..9f1d4b15d818 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -816,23 +816,24 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) struct xhci_ring *ring; struct xhci_td *cur_td; int ret, i, j; + unsigned long flags; ep = (struct xhci_virt_ep *) arg; xhci = ep->xhci; - spin_lock(&xhci->lock); + spin_lock_irqsave(&xhci->lock, flags); ep->stop_cmds_pending--; if (xhci->xhc_state & XHCI_STATE_DYING) { xhci_dbg(xhci, "Stop EP timer ran, but another timer marked " "xHCI as DYING, exiting.\n"); - spin_unlock(&xhci->lock); + spin_unlock_irqrestore(&xhci->lock, flags); return; } if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { xhci_dbg(xhci, "Stop EP timer ran, but no command pending, " "exiting.\n"); - spin_unlock(&xhci->lock); + spin_unlock_irqrestore(&xhci->lock, flags); return; } @@ -844,11 +845,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) xhci->xhc_state |= XHCI_STATE_DYING; /* Disable interrupts from the host controller and start halting it */ xhci_quiesce(xhci); - spin_unlock(&xhci->lock); + spin_unlock_irqrestore(&xhci->lock, flags); ret = xhci_halt(xhci); - spin_lock(&xhci->lock); + spin_lock_irqsave(&xhci->lock, flags); if (ret < 0) { /* This is bad; the host is not responding to commands and it's * not allowing itself to be halted. At least interrupts are @@ -896,7 +897,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) } } } - spin_unlock(&xhci->lock); + spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "Calling usb_hc_died()\n"); usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); xhci_dbg(xhci, "xHCI host controller is dead.\n"); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 1ff95a0df576..a1afb7c39f7e 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) ring = xhci->cmd_ring; seg = ring->deq_seg; do { - memset(seg->trbs, 0, SEGMENT_SIZE); + memset(seg->trbs, 0, + sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); + seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= + cpu_to_le32(~TRB_CYCLE); seg = seg->next; } while (seg != ring->deq_seg); @@ -799,7 +802,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; - int retval; + int retval = 0; /* Wait a bit if either of the roothubs need to settle from the * transition into bus suspend. @@ -809,6 +812,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci->bus_state[1].next_statechange)) msleep(100); + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); + spin_lock_irq(&xhci->lock); if (xhci->quirks & XHCI_RESET_ON_RESUME) hibernated = true; @@ -878,20 +884,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) return retval; xhci_dbg(xhci, "Start the primary HCD\n"); retval = xhci_run(hcd->primary_hcd); - if (retval) - goto failed_restart; - - xhci_dbg(xhci, "Start the secondary HCD\n"); - retval = xhci_run(secondary_hcd); if (!retval) { - set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); - set_bit(HCD_FLAG_HW_ACCESSIBLE, - &xhci->shared_hcd->flags); + xhci_dbg(xhci, "Start the secondary HCD\n"); + retval = xhci_run(secondary_hcd); } -failed_restart: hcd->state = HC_STATE_SUSPENDED; xhci->shared_hcd->state = HC_STATE_SUSPENDED; - return retval; + goto done; } /* step 4: set Run/Stop bit */ @@ -910,11 +909,14 @@ failed_restart: * Running endpoints by ringing their doorbells */ - set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); - set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); - spin_unlock_irq(&xhci->lock); - return 0; + + done: + if (retval == 0) { + usb_hcd_resume_root_hub(hcd); + usb_hcd_resume_root_hub(xhci->shared_hcd); + } + return retval; } #endif /* CONFIG_PM */ @@ -3504,6 +3506,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) /* Otherwise, update the control endpoint ring enqueue pointer. */ else xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); + ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); + ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); + ctrl_ctx->drop_flags = 0; + xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); @@ -3585,7 +3591,6 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) + 1; /* Zero the input context control for later use */ - ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); ctrl_ctx->add_flags = 0; ctrl_ctx->drop_flags = 0; diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index fc34b8b11910..07a03460a598 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -11,6 +11,7 @@ config USB_MUSB_HDRC select TWL4030_USB if MACH_OMAP_3430SDP select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA select USB_OTG_UTILS + select USB_GADGET_DUALSPEED tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' help Say Y here if your system has a dual role high speed USB @@ -60,7 +61,7 @@ config USB_MUSB_BLACKFIN config USB_MUSB_UX500 tristate "U8500 and U5500" - depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500) + depends on (ARCH_U8500 && AB8500_USB) endchoice diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c index 08f1d0b662a3..e233d2b7d335 100644 --- a/drivers/usb/musb/am35x.c +++ b/drivers/usb/musb/am35x.c @@ -27,6 +27,7 @@ */ #include <linux/init.h> +#include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/platform_device.h> diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 4da7492ddbdb..2613bfdb09b6 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c @@ -27,6 +27,7 @@ */ #include <linux/init.h> +#include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/platform_device.h> diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 20a28731c338..b63ab1570103 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1477,8 +1477,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) /*-------------------------------------------------------------------------*/ #if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ - defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \ - defined(CONFIG_ARCH_U5500) + defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) static irqreturn_t generic_interrupt(int irq, void *__hci) { @@ -2302,18 +2301,12 @@ static int musb_suspend(struct device *dev) */ } - musb_save_context(musb); - spin_unlock_irqrestore(&musb->lock, flags); return 0; } static int musb_resume_noirq(struct device *dev) { - struct musb *musb = dev_to_musb(dev); - - musb_restore_context(musb); - /* for static cmos like DaVinci, register values were preserved * unless for some reason the whole soc powered down or the USB * module got reset through the PSC (vs just being disabled). diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index ae4a20acef6c..922148ff8d29 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g, unsigned long flags; int retval = -EINVAL; - if (driver->speed != USB_SPEED_HIGH) + if (driver->speed < USB_SPEED_HIGH) goto err0; pm_runtime_get_sync(musb->controller); @@ -1999,10 +1999,6 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) nuke(&hw_ep->ep_out, -ESHUTDOWN); } } - - spin_unlock(&musb->lock); - driver->disconnect(&musb->g); - spin_lock(&musb->lock); } } diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index d2e2efaba658..08c679c0dde5 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -405,7 +405,7 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev) /* * platform functions */ -static int __devinit usbhs_probe(struct platform_device *pdev) +static int usbhs_probe(struct platform_device *pdev) { struct renesas_usbhs_platform_info *info = pdev->dev.platform_data; struct renesas_usbhs_driver_callback *dfunc; diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 8da685e796d1..ffdf5d15085e 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -820,7 +820,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) if (len % 4) /* 32bit alignment */ goto usbhsf_pio_prepare_push; - if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ + if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ goto usbhsf_pio_prepare_push; /* get enable DMA fifo */ @@ -897,7 +897,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done) if (!fifo) goto usbhsf_pio_prepare_pop; - if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ + if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ goto usbhsf_pio_prepare_pop; ret = usbhsf_fifo_select(pipe, fifo, 0); diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h index 8ae3733031cd..6c6875533f01 100644 --- a/drivers/usb/renesas_usbhs/mod.h +++ b/drivers/usb/renesas_usbhs/mod.h @@ -143,8 +143,8 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod); */ #if defined(CONFIG_USB_RENESAS_USBHS_HCD) || \ defined(CONFIG_USB_RENESAS_USBHS_HCD_MODULE) -extern int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv); -extern int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv); +extern int usbhs_mod_host_probe(struct usbhs_priv *priv); +extern int usbhs_mod_host_remove(struct usbhs_priv *priv); #else static inline int usbhs_mod_host_probe(struct usbhs_priv *priv) { @@ -157,8 +157,8 @@ static inline void usbhs_mod_host_remove(struct usbhs_priv *priv) #if defined(CONFIG_USB_RENESAS_USBHS_UDC) || \ defined(CONFIG_USB_RENESAS_USBHS_UDC_MODULE) -extern int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv); -extern void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv); +extern int usbhs_mod_gadget_probe(struct usbhs_priv *priv); +extern void usbhs_mod_gadget_remove(struct usbhs_priv *priv); #else static inline int usbhs_mod_gadget_probe(struct usbhs_priv *priv) { diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 4cc7ee0babc6..7f4e80338570 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); - struct usbhs_priv *priv; - struct device *dev; - int ret; + struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); if (!driver || !driver->setup || - driver->speed != USB_SPEED_HIGH) + driver->speed < USB_SPEED_FULL) return -EINVAL; - dev = usbhsg_gpriv_to_dev(gpriv); - priv = usbhsg_gpriv_to_priv(gpriv); - /* first hook up the driver ... */ gpriv->driver = driver; gpriv->gadget.dev.driver = &driver->driver; - ret = device_add(&gpriv->gadget.dev); - if (ret) { - dev_err(dev, "device_add error %d\n", ret); - goto add_fail; - } - return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD); - -add_fail: - gpriv->driver = NULL; - gpriv->gadget.dev.driver = NULL; - - return ret; } static int usbhsg_gadget_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); - struct usbhs_priv *priv; - struct device *dev; + struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); if (!driver || !driver->unbind) return -EINVAL; - dev = usbhsg_gpriv_to_dev(gpriv); - priv = usbhsg_gpriv_to_priv(gpriv); - usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD); - device_del(&gpriv->gadget.dev); + gpriv->gadget.dev.driver = NULL; gpriv->driver = NULL; return 0; @@ -827,10 +806,17 @@ static int usbhsg_start(struct usbhs_priv *priv) static int usbhsg_stop(struct usbhs_priv *priv) { + struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); + + /* cable disconnect */ + if (gpriv->driver && + gpriv->driver->disconnect) + gpriv->driver->disconnect(&gpriv->gadget); + return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED); } -int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv) +int usbhs_mod_gadget_probe(struct usbhs_priv *priv) { struct usbhsg_gpriv *gpriv; struct usbhsg_uep *uep; @@ -876,12 +862,14 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv) /* * init gadget */ - device_initialize(&gpriv->gadget.dev); dev_set_name(&gpriv->gadget.dev, "gadget"); gpriv->gadget.dev.parent = dev; gpriv->gadget.name = "renesas_usbhs_udc"; gpriv->gadget.ops = &usbhsg_gadget_ops; gpriv->gadget.is_dualspeed = 1; + ret = device_register(&gpriv->gadget.dev); + if (ret < 0) + goto err_add_udc; INIT_LIST_HEAD(&gpriv->gadget.ep_list); @@ -912,12 +900,15 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv) ret = usb_add_gadget_udc(dev, &gpriv->gadget); if (ret) - goto err_add_udc; + goto err_register; dev_info(dev, "gadget probed\n"); return 0; + +err_register: + device_unregister(&gpriv->gadget.dev); err_add_udc: kfree(gpriv->uep); @@ -927,12 +918,14 @@ usbhs_mod_gadget_probe_err_gpriv: return ret; } -void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv) +void usbhs_mod_gadget_remove(struct usbhs_priv *priv) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); usb_del_gadget_udc(&gpriv->gadget); + device_unregister(&gpriv->gadget.dev); + usbhsg_controller_unregister(gpriv); kfree(gpriv->uep); diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c index 1a7208a50afc..bade761a1e52 100644 --- a/drivers/usb/renesas_usbhs/mod_host.c +++ b/drivers/usb/renesas_usbhs/mod_host.c @@ -103,7 +103,7 @@ struct usbhsh_hpriv { u32 port_stat; /* USB_PORT_STAT_xxx */ - struct completion *done; + struct completion setup_ack_done; /* see usbhsh_req_alloc/free */ struct list_head ureq_link_active; @@ -355,6 +355,7 @@ static void usbhsh_device_free(struct usbhsh_hpriv *hpriv, struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, struct usbhsh_device *udev, struct usb_host_endpoint *ep, + int dir_in_req, gfp_t mem_flags) { struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv); @@ -364,27 +365,38 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, struct usbhs_pipe *pipe, *best_pipe; struct device *dev = usbhsh_hcd_to_dev(hcd); struct usb_endpoint_descriptor *desc = &ep->desc; - int type, i; + int type, i, dir_in; unsigned int min_usr; + dir_in_req = !!dir_in_req; + uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags); if (!uep) { dev_err(dev, "usbhsh_ep alloc fail\n"); return NULL; } - type = usb_endpoint_type(desc); + + if (usb_endpoint_xfer_control(desc)) { + best_pipe = usbhsh_hpriv_to_dcp(hpriv); + goto usbhsh_endpoint_alloc_find_pipe; + } /* * find best pipe for endpoint * see * HARDWARE LIMITATION */ + type = usb_endpoint_type(desc); min_usr = ~0; best_pipe = NULL; - usbhs_for_each_pipe_with_dcp(pipe, priv, i) { + usbhs_for_each_pipe(pipe, priv, i) { if (!usbhs_pipe_type_is(pipe, type)) continue; + dir_in = !!usbhs_pipe_is_dir_in(pipe); + if (0 != (dir_in - dir_in_req)) + continue; + info = usbhsh_pipe_info(pipe); if (min_usr > info->usr_cnt) { @@ -398,7 +410,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, kfree(uep); return NULL; } - +usbhsh_endpoint_alloc_find_pipe: /* * init uep */ @@ -423,6 +435,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, * see * DCPMAXP/PIPEMAXP */ + usbhs_pipe_sequence_data0(uep->pipe); usbhs_pipe_config_update(uep->pipe, usbhsh_device_number(hpriv, udev), usb_endpoint_num(desc), @@ -430,7 +443,7 @@ struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv, dev_dbg(dev, "%s [%d-%s](%p)\n", __func__, usbhsh_device_number(hpriv, udev), - usbhs_pipe_name(pipe), uep); + usbhs_pipe_name(uep->pipe), uep); return uep; } @@ -549,8 +562,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv, * usbhsh_irq_setup_ack() * usbhsh_irq_setup_err() */ - DECLARE_COMPLETION(done); - hpriv->done = &done; + init_completion(&hpriv->setup_ack_done); /* copy original request */ memcpy(&req, urb->setup_packet, sizeof(struct usb_ctrlrequest)); @@ -572,8 +584,7 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv, /* * wait setup packet ACK */ - wait_for_completion(&done); - hpriv->done = NULL; + wait_for_completion(&hpriv->setup_ack_done); dev_dbg(dev, "%s done\n", __func__); } @@ -724,11 +735,11 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd, struct usbhsh_device *udev, *new_udev = NULL; struct usbhs_pipe *pipe; struct usbhsh_ep *uep; + int is_dir_in = usb_pipein(urb->pipe); int ret; - dev_dbg(dev, "%s (%s)\n", - __func__, usb_pipein(urb->pipe) ? "in" : "out"); + dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out"); ret = usb_hcd_link_urb_to_ep(hcd, urb); if (ret) @@ -751,7 +762,8 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd, */ uep = usbhsh_ep_to_uep(ep); if (!uep) { - uep = usbhsh_endpoint_alloc(hpriv, udev, ep, mem_flags); + uep = usbhsh_endpoint_alloc(hpriv, udev, ep, + is_dir_in, mem_flags); if (!uep) goto usbhsh_urb_enqueue_error_free_device; } @@ -1095,10 +1107,7 @@ static int usbhsh_irq_setup_ack(struct usbhs_priv *priv, dev_dbg(dev, "setup packet OK\n"); - if (unlikely(!hpriv->done)) - dev_err(dev, "setup ack happen without necessary data\n"); - else - complete(hpriv->done); /* see usbhsh_urb_enqueue() */ + complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */ return 0; } @@ -1111,10 +1120,7 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv, dev_dbg(dev, "setup packet Err\n"); - if (unlikely(!hpriv->done)) - dev_err(dev, "setup err happen without necessary data\n"); - else - complete(hpriv->done); /* see usbhsh_urb_enqueue() */ + complete(&hpriv->setup_ack_done); /* see usbhsh_urb_enqueue() */ return 0; } @@ -1221,8 +1227,18 @@ static int usbhsh_stop(struct usbhs_priv *priv) { struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); + struct usbhs_mod *mod = usbhs_mod_get_current(priv); struct device *dev = usbhs_priv_to_dev(priv); + /* + * disable irq callback + */ + mod->irq_attch = NULL; + mod->irq_dtch = NULL; + mod->irq_sack = NULL; + mod->irq_sign = NULL; + usbhs_irq_callback_update(priv, mod); + usb_remove_hcd(hcd); /* disable sys */ @@ -1235,7 +1251,7 @@ static int usbhsh_stop(struct usbhs_priv *priv) return 0; } -int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv) +int usbhs_mod_host_probe(struct usbhs_priv *priv) { struct usbhsh_hpriv *hpriv; struct usb_hcd *hcd; @@ -1279,7 +1295,6 @@ int __devinit usbhs_mod_host_probe(struct usbhs_priv *priv) hpriv->mod.stop = usbhsh_stop; hpriv->pipe_info = pipe_info; hpriv->pipe_size = pipe_size; - hpriv->done = NULL; usbhsh_req_list_init(hpriv); usbhsh_port_stat_init(hpriv); @@ -1299,7 +1314,7 @@ usbhs_mod_host_probe_err: return -ENOMEM; } -int __devexit usbhs_mod_host_remove(struct usbhs_priv *priv) +int usbhs_mod_host_remove(struct usbhs_priv *priv) { struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv); struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv); diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index 5cdb9d912275..18e875b92e00 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -42,7 +42,7 @@ static int debug; * Version information */ -#define DRIVER_VERSION "v0.6" +#define DRIVER_VERSION "v0.7" #define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>" #define DRIVER_DESC "USB ARK3116 serial/IrDA driver" #define DRIVER_DEV_DESC "ARK3116 RS232/IrDA" @@ -380,10 +380,6 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port) goto err_out; } - /* setup termios */ - if (tty) - ark3116_set_termios(tty, port, NULL); - /* remove any data still left: also clears error state */ ark3116_read_reg(serial, UART_RX, buf); @@ -406,6 +402,10 @@ static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port) /* enable DMA */ ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT); + /* setup termios */ + if (tty) + ark3116_set_termios(tty, port, NULL); + err_out: kfree(buf); return result; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8fe034d2d3e7..ff3db5d056a5 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), @@ -2104,13 +2105,19 @@ static void ftdi_set_termios(struct tty_struct *tty, cflag = termios->c_cflag; - /* FIXME -For this cut I don't care if the line is really changing or - not - so just do the change regardless - should be able to - compare old_termios and tty->termios */ + if (old_termios->c_cflag == termios->c_cflag + && old_termios->c_ispeed == termios->c_ispeed + && old_termios->c_ospeed == termios->c_ospeed) + goto no_c_cflag_changes; + /* NOTE These routines can get interrupted by ftdi_sio_read_bulk_callback - need to examine what this means - don't see any problems yet */ + if ((old_termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)) == + (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB))) + goto no_data_parity_stop_changes; + /* Set number of data bits, parity, stop bits */ urb_value = 0; @@ -2151,6 +2158,7 @@ static void ftdi_set_termios(struct tty_struct *tty, } /* Now do the baudrate */ +no_data_parity_stop_changes: if ((cflag & CBAUD) == B0) { /* Disable flow control */ if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0), @@ -2178,6 +2186,7 @@ static void ftdi_set_termios(struct tty_struct *tty, /* Set flow control */ /* Note device also supports DTR/CD (ugh) and Xon/Xoff in hardware */ +no_c_cflag_changes: if (cflag & CRTSCTS) { dbg("%s Setting to CRTSCTS flow control", __func__); if (usb_control_msg(dev, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 571fa96b49c7..055b64ef0bba 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -112,6 +112,7 @@ /* Propox devices */ #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 +#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739 /* Lenz LI-USB Computer Interface. */ #define FTDI_LENZ_LIUSB_PID 0xD780 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 89ae1f65e1b1..e3426602dc82 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -156,6 +156,7 @@ static void option_instat_callback(struct urb *urb); #define HUAWEI_PRODUCT_K4511 0x14CC #define HUAWEI_PRODUCT_ETS1220 0x1803 #define HUAWEI_PRODUCT_E353 0x1506 +#define HUAWEI_PRODUCT_E173S 0x1C05 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 @@ -316,6 +317,9 @@ static void option_instat_callback(struct urb *urb); #define ZTE_PRODUCT_AC8710 0xfff1 #define ZTE_PRODUCT_AC2726 0xfff5 #define ZTE_PRODUCT_AC8710T 0xffff +#define ZTE_PRODUCT_MC2718 0xffe8 +#define ZTE_PRODUCT_AD3812 0xffeb +#define ZTE_PRODUCT_MC2716 0xffed #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 @@ -468,6 +472,10 @@ static void option_instat_callback(struct urb *urb); #define YUGA_PRODUCT_CLU528 0x260D #define YUGA_PRODUCT_CLU526 0x260F +/* Viettel products */ +#define VIETTEL_VENDOR_ID 0x2262 +#define VIETTEL_PRODUCT_VT1000 0x0002 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -500,6 +508,18 @@ static const struct option_blacklist_info zte_k3765_z_blacklist = { .reserved = BIT(4), }; +static const struct option_blacklist_info zte_ad3812_z_blacklist = { + .sendsetup = BIT(0) | BIT(1) | BIT(2), +}; + +static const struct option_blacklist_info zte_mc2718_z_blacklist = { + .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), +}; + +static const struct option_blacklist_info zte_mc2716_z_blacklist = { + .sendsetup = BIT(1) | BIT(2) | BIT(3), +}; + static const struct option_blacklist_info huawei_cdc12_blacklist = { .reserved = BIT(1) | BIT(2), }; @@ -622,6 +642,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), @@ -640,6 +661,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x08) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, @@ -726,6 +750,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, @@ -1043,6 +1068,12 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ @@ -1141,6 +1172,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, + { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 9083d1e616b4..fc2d66f7f4eb 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -91,7 +91,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, - { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) }, { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 3d10d7f02072..c38b8c00c06f 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -145,10 +145,6 @@ #define ADLINK_VENDOR_ID 0x0b63 #define ADLINK_ND6530_PRODUCT_ID 0x6530 -/* WinChipHead USB->RS 232 adapter */ -#define WINCHIPHEAD_VENDOR_ID 0x4348 -#define WINCHIPHEAD_USBSER_PRODUCT_ID 0x5523 - /* SMART USB Serial Adapter */ #define SMART_VENDOR_ID 0x0b8c #define SMART_PRODUCT_ID 0x2303 diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 4dca3ef0668c..9fbe742343c6 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -1762,10 +1762,9 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb) result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1); } else { void *buf; - int offset; + int offset = 0; u16 PhyBlockAddr; u8 PageNum; - u32 result; u16 len, oldphy, newphy; buf = kmalloc(blenByte, GFP_KERNEL); diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c index 93c1a4d86f51..82dd834709c7 100644 --- a/drivers/usb/storage/protocol.c +++ b/drivers/usb/storage/protocol.c @@ -59,7 +59,9 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us) { - /* Pad the SCSI command with zeros out to 12 bytes + /* + * Pad the SCSI command with zeros out to 12 bytes. If the + * command already is 12 bytes or longer, leave it alone. * * NOTE: This only works because a scsi_cmnd struct field contains * a unsigned char cmnd[16], so we know we have storage available @@ -67,9 +69,6 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us) for (; srb->cmd_len<12; srb->cmd_len++) srb->cmnd[srb->cmd_len] = 0; - /* set command length to 12 bytes */ - srb->cmd_len = 12; - /* send the command to the transport layer */ usb_stor_invoke_transport(srb, us); } diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 3041a974faf3..24caba79d722 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1854,6 +1854,13 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Reported by Qinglin Ye <yestyle@gmail.com> */ +UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100, + "Kingston", + "DT 101 G2", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BULK_IGNORE_TAG ), + /* Reported by Francesco Foresti <frafore@tiscali.it> */ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201, "Super Top", diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 55f91d9ab00b..29577bf1f559 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c @@ -116,6 +116,7 @@ /* Clock registers available only on Version 2 */ #define LCD_CLK_ENABLE_REG 0x6c #define LCD_CLK_RESET_REG 0x70 +#define LCD_CLK_MAIN_RESET BIT(3) #define LCD_NUM_BUFFERS 2 @@ -244,6 +245,10 @@ static inline void lcd_enable_raster(void) { u32 reg; + /* Bring LCDC out of reset */ + if (lcd_revision == LCD_VERSION_2) + lcdc_write(0, LCD_CLK_RESET_REG); + reg = lcdc_read(LCD_RASTER_CTRL_REG); if (!(reg & LCD_RASTER_ENABLE)) lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); @@ -257,6 +262,10 @@ static inline void lcd_disable_raster(void) reg = lcdc_read(LCD_RASTER_CTRL_REG); if (reg & LCD_RASTER_ENABLE) lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); + + if (lcd_revision == LCD_VERSION_2) + /* Write 1 to reset LCDC */ + lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG); } static void lcd_blit(int load_mode, struct da8xx_fb_par *par) @@ -584,8 +593,12 @@ static void lcd_reset(struct da8xx_fb_par *par) lcdc_write(0, LCD_DMA_CTRL_REG); lcdc_write(0, LCD_RASTER_CTRL_REG); - if (lcd_revision == LCD_VERSION_2) + if (lcd_revision == LCD_VERSION_2) { lcdc_write(0, LCD_INT_ENABLE_SET_REG); + /* Write 1 to reset */ + lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG); + lcdc_write(0, LCD_CLK_RESET_REG); + } } static void lcd_calc_clk_divider(struct da8xx_fb_par *par) diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c index 0ccd7adf47bb..6f61e781f15a 100644 --- a/drivers/video/omap/dispc.c +++ b/drivers/video/omap/dispc.c @@ -19,6 +19,7 @@ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/vmalloc.h> diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 3532782551cb..5c81533eacaa 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -1720,12 +1720,11 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane, const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); unsigned long fclk = 0; - if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { - if (width != out_width || height != out_height) - return -EINVAL; - else - return 0; - } + if (width == out_width && height == out_height) + return 0; + + if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) + return -EINVAL; if (out_width < width / maxdownscale || out_width > width * 8) diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index 3262f0f1fa35..c56378c555b0 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -269,7 +269,7 @@ static void update_hdmi_timings(struct hdmi_config *cfg, unsigned long hdmi_get_pixel_clock(void) { /* HDMI Pixel Clock in Mhz */ - return hdmi.ip_data.cfg.timings.timings.pixel_clock * 10000; + return hdmi.ip_data.cfg.timings.timings.pixel_clock * 1000; } static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h index 69d882cbe709..c01c1c162726 100644 --- a/drivers/video/via/share.h +++ b/drivers/video/via/share.h @@ -559,8 +559,8 @@ #define M1200X720_R60_VSP POSITIVE /* 1200x900@60 Sync Polarity (DCON) */ -#define M1200X900_R60_HSP NEGATIVE -#define M1200X900_R60_VSP NEGATIVE +#define M1200X900_R60_HSP POSITIVE +#define M1200X900_R60_VSP POSITIVE /* 1280x600@60 Sync Polarity (GTF Mode) */ #define M1280x600_R60_HSP NEGATIVE diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 816ed08e7cf3..1a61939b85fc 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -37,7 +37,7 @@ config VIRTIO_BALLOON config VIRTIO_MMIO tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)" - depends on EXPERIMENTAL + depends on HAS_IOMEM && EXPERIMENTAL select VIRTIO select VIRTIO_RING ---help--- diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index acc5e43c373e..7317dc2ec426 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -118,7 +118,7 @@ static void vm_finalize_features(struct virtio_device *vdev) vring_transport_features(vdev); for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { - writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET); + writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); writel(vdev->features[i], vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); } diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 79a31e5b4b68..03d1984bd363 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -169,11 +169,29 @@ static void vp_set_status(struct virtio_device *vdev, u8 status) iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); } +/* wait for pending irq handlers */ +static void vp_synchronize_vectors(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + int i; + + if (vp_dev->intx_enabled) + synchronize_irq(vp_dev->pci_dev->irq); + + for (i = 0; i < vp_dev->msix_vectors; ++i) + synchronize_irq(vp_dev->msix_entries[i].vector); +} + static void vp_reset(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* 0 status means a reset. */ iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); + /* Flush out the status write, and flush in device writes, + * including MSi-X interrupts, if any. */ + ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); + /* Flush pending VQ/configuration callbacks. */ + vp_synchronize_vectors(vdev); } /* the notify function used when creating a virt queue */ @@ -594,11 +612,11 @@ static struct virtio_config_ops virtio_pci_config_ops = { static void virtio_pci_release_dev(struct device *_d) { - struct virtio_device *dev = container_of(_d, struct virtio_device, - dev); - struct virtio_pci_device *vp_dev = to_vp_device(dev); - - kfree(vp_dev); + /* + * No need for a release method as we allocate/free + * all devices together with the pci devices. + * Provide an empty one to avoid getting a warning from core. + */ } /* the PCI probing function */ @@ -686,6 +704,7 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev) pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); + kfree(vp_dev); } #ifdef CONFIG_PM diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 6285867a9356..79fd606b7cd5 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -314,13 +314,6 @@ config NUC900_WATCHDOG To compile this driver as a module, choose M here: the module will be called nuc900_wdt. -config ADX_WATCHDOG - tristate "Avionic Design Xanthos watchdog" - depends on ARCH_PXA_ADX - help - Say Y here if you want support for the watchdog timer on Avionic - Design Xanthos boards. - config TS72XX_WATCHDOG tristate "TS-72XX SBC Watchdog" depends on MACH_TS72XX diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 55bd5740e910..fe893e91935b 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -51,7 +51,6 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o -obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c deleted file mode 100644 index af6e6b16475a..000000000000 --- a/drivers/watchdog/adx_wdt.c +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Copyright (C) 2008-2009 Avionic Design GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/fs.h> -#include <linux/gfp.h> -#include <linux/io.h> -#include <linux/miscdevice.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/types.h> -#include <linux/uaccess.h> -#include <linux/watchdog.h> - -#define WATCHDOG_NAME "adx-wdt" - -/* register offsets */ -#define ADX_WDT_CONTROL 0x00 -#define ADX_WDT_CONTROL_ENABLE (1 << 0) -#define ADX_WDT_CONTROL_nRESET (1 << 1) -#define ADX_WDT_TIMEOUT 0x08 - -static struct platform_device *adx_wdt_dev; -static unsigned long driver_open; - -#define WDT_STATE_STOP 0 -#define WDT_STATE_START 1 - -struct adx_wdt { - void __iomem *base; - unsigned long timeout; - unsigned int state; - unsigned int wake; - spinlock_t lock; -}; - -static const struct watchdog_info adx_wdt_info = { - .identity = "Avionic Design Xanthos Watchdog", - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, -}; - -static void adx_wdt_start_locked(struct adx_wdt *wdt) -{ - u32 ctrl; - - ctrl = readl(wdt->base + ADX_WDT_CONTROL); - ctrl |= ADX_WDT_CONTROL_ENABLE; - writel(ctrl, wdt->base + ADX_WDT_CONTROL); - wdt->state = WDT_STATE_START; -} - -static void adx_wdt_start(struct adx_wdt *wdt) -{ - unsigned long flags; - - spin_lock_irqsave(&wdt->lock, flags); - adx_wdt_start_locked(wdt); - spin_unlock_irqrestore(&wdt->lock, flags); -} - -static void adx_wdt_stop_locked(struct adx_wdt *wdt) -{ - u32 ctrl; - - ctrl = readl(wdt->base + ADX_WDT_CONTROL); - ctrl &= ~ADX_WDT_CONTROL_ENABLE; - writel(ctrl, wdt->base + ADX_WDT_CONTROL); - wdt->state = WDT_STATE_STOP; -} - -static void adx_wdt_stop(struct adx_wdt *wdt) -{ - unsigned long flags; - - spin_lock_irqsave(&wdt->lock, flags); - adx_wdt_stop_locked(wdt); - spin_unlock_irqrestore(&wdt->lock, flags); -} - -static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds) -{ - unsigned long timeout = seconds * 1000; - unsigned long flags; - unsigned int state; - - spin_lock_irqsave(&wdt->lock, flags); - state = wdt->state; - adx_wdt_stop_locked(wdt); - writel(timeout, wdt->base + ADX_WDT_TIMEOUT); - - if (state == WDT_STATE_START) - adx_wdt_start_locked(wdt); - - wdt->timeout = timeout; - spin_unlock_irqrestore(&wdt->lock, flags); -} - -static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds) -{ - *seconds = wdt->timeout / 1000; -} - -static void adx_wdt_keepalive(struct adx_wdt *wdt) -{ - unsigned long flags; - - spin_lock_irqsave(&wdt->lock, flags); - writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT); - spin_unlock_irqrestore(&wdt->lock, flags); -} - -static int adx_wdt_open(struct inode *inode, struct file *file) -{ - struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev); - - if (test_and_set_bit(0, &driver_open)) - return -EBUSY; - - file->private_data = wdt; - adx_wdt_set_timeout(wdt, 30); - adx_wdt_start(wdt); - - return nonseekable_open(inode, file); -} - -static int adx_wdt_release(struct inode *inode, struct file *file) -{ - struct adx_wdt *wdt = file->private_data; - - adx_wdt_stop(wdt); - clear_bit(0, &driver_open); - - return 0; -} - -static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct adx_wdt *wdt = file->private_data; - void __user *argp = (void __user *)arg; - unsigned long __user *p = argp; - unsigned long seconds = 0; - unsigned int options; - long ret = -EINVAL; - - switch (cmd) { - case WDIOC_GETSUPPORT: - if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info))) - return -EFAULT; - else - return 0; - - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - return put_user(0, p); - - case WDIOC_KEEPALIVE: - adx_wdt_keepalive(wdt); - return 0; - - case WDIOC_SETTIMEOUT: - if (get_user(seconds, p)) - return -EFAULT; - - adx_wdt_set_timeout(wdt, seconds); - - /* fallthrough */ - case WDIOC_GETTIMEOUT: - adx_wdt_get_timeout(wdt, &seconds); - return put_user(seconds, p); - - case WDIOC_SETOPTIONS: - if (copy_from_user(&options, argp, sizeof(options))) - return -EFAULT; - - if (options & WDIOS_DISABLECARD) { - adx_wdt_stop(wdt); - ret = 0; - } - - if (options & WDIOS_ENABLECARD) { - adx_wdt_start(wdt); - ret = 0; - } - - return ret; - - default: - break; - } - - return -ENOTTY; -} - -static ssize_t adx_wdt_write(struct file *file, const char __user *data, - size_t len, loff_t *ppos) -{ - struct adx_wdt *wdt = file->private_data; - - if (len) - adx_wdt_keepalive(wdt); - - return len; -} - -static const struct file_operations adx_wdt_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .open = adx_wdt_open, - .release = adx_wdt_release, - .unlocked_ioctl = adx_wdt_ioctl, - .write = adx_wdt_write, -}; - -static struct miscdevice adx_wdt_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &adx_wdt_fops, -}; - -static int __devinit adx_wdt_probe(struct platform_device *pdev) -{ - struct resource *res; - struct adx_wdt *wdt; - int ret = 0; - u32 ctrl; - - wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); - if (!wdt) { - dev_err(&pdev->dev, "cannot allocate WDT structure\n"); - return -ENOMEM; - } - - spin_lock_init(&wdt->lock); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "cannot obtain I/O memory region\n"); - return -ENXIO; - } - - res = devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), res->name); - if (!res) { - dev_err(&pdev->dev, "cannot request I/O memory region\n"); - return -ENXIO; - } - - wdt->base = devm_ioremap_nocache(&pdev->dev, res->start, - resource_size(res)); - if (!wdt->base) { - dev_err(&pdev->dev, "cannot remap I/O memory region\n"); - return -ENXIO; - } - - /* disable watchdog and reboot on timeout */ - ctrl = readl(wdt->base + ADX_WDT_CONTROL); - ctrl &= ~ADX_WDT_CONTROL_ENABLE; - ctrl &= ~ADX_WDT_CONTROL_nRESET; - writel(ctrl, wdt->base + ADX_WDT_CONTROL); - - platform_set_drvdata(pdev, wdt); - adx_wdt_dev = pdev; - - ret = misc_register(&adx_wdt_miscdev); - if (ret) { - dev_err(&pdev->dev, "cannot register miscdev on minor %d " - "(err=%d)\n", WATCHDOG_MINOR, ret); - return ret; - } - - return 0; -} - -static int __devexit adx_wdt_remove(struct platform_device *pdev) -{ - struct adx_wdt *wdt = platform_get_drvdata(pdev); - - misc_deregister(&adx_wdt_miscdev); - adx_wdt_stop(wdt); - platform_set_drvdata(pdev, NULL); - - return 0; -} - -static void adx_wdt_shutdown(struct platform_device *pdev) -{ - struct adx_wdt *wdt = platform_get_drvdata(pdev); - adx_wdt_stop(wdt); -} - -#ifdef CONFIG_PM -static int adx_wdt_suspend(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct adx_wdt *wdt = platform_get_drvdata(pdev); - - wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0; - adx_wdt_stop(wdt); - - return 0; -} - -static int adx_wdt_resume(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct adx_wdt *wdt = platform_get_drvdata(pdev); - - if (wdt->wake) - adx_wdt_start(wdt); - - return 0; -} - -static const struct dev_pm_ops adx_wdt_pm_ops = { - .suspend = adx_wdt_suspend, - .resume = adx_wdt_resume, -}; - -# define ADX_WDT_PM_OPS (&adx_wdt_pm_ops) -#else -# define ADX_WDT_PM_OPS NULL -#endif - -static struct platform_driver adx_wdt_driver = { - .probe = adx_wdt_probe, - .remove = __devexit_p(adx_wdt_remove), - .shutdown = adx_wdt_shutdown, - .driver = { - .name = WATCHDOG_NAME, - .owner = THIS_MODULE, - .pm = ADX_WDT_PM_OPS, - }, -}; - -static int __init adx_wdt_init(void) -{ - return platform_driver_register(&adx_wdt_driver); -} - -static void __exit adx_wdt_exit(void) -{ - platform_driver_unregister(&adx_wdt_driver); -} - -module_init(adx_wdt_init); -module_exit(adx_wdt_exit); - -MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver"); -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); -MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 5de7e4fa5b8a..a79e3840782a 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -401,8 +401,8 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev) dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n", (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in", - (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis", - (wtcon & S3C2410_WTCON_INTEN) ? "" : "en"); + (wtcon & S3C2410_WTCON_RSTEN) ? "en" : "dis", + (wtcon & S3C2410_WTCON_INTEN) ? "en" : "dis"); return 0; diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c index 7be38556aed0..e789a47db41f 100644 --- a/drivers/watchdog/wm831x_wdt.c +++ b/drivers/watchdog/wm831x_wdt.c @@ -150,7 +150,7 @@ static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev, if (wm831x_wdt_cfgs[i].time == timeout) break; if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) - ret = -EINVAL; + return -EINVAL; ret = wm831x_reg_unlock(wm831x); if (ret == 0) { diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index a767884a6c7a..31ab82fda38a 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -501,7 +501,7 @@ EXPORT_SYMBOL_GPL(balloon_set_new_target); * alloc_xenballooned_pages - get pages that have been ballooned out * @nr_pages: Number of pages to get * @pages: pages returned - * @highmem: highmem or lowmem pages + * @highmem: allow highmem pages * @return 0 on success, error otherwise */ int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) @@ -511,7 +511,7 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) mutex_lock(&balloon_mutex); while (pgno < nr_pages) { page = balloon_retrieve(highmem); - if (page && PageHighMem(page) == highmem) { + if (page && (highmem || !PageHighMem(page))) { pages[pgno++] = page; } else { enum bp_state st; diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index f6832f46aea4..e1c4c6e5b469 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -135,7 +135,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, /* Grant foreign access to the page. */ gref->gref_id = gnttab_grant_foreign_access(op->domid, pfn_to_mfn(page_to_pfn(gref->page)), readonly); - if (gref->gref_id < 0) { + if ((int)gref->gref_id < 0) { rc = gref->gref_id; goto undo; } @@ -280,7 +280,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv, goto out; } - gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY); + gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY); if (!gref_ids) { rc = -ENOMEM; goto out; diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 39871326afa2..afca14d9042e 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -114,11 +114,11 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) if (NULL == add) return NULL; - add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL); - add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL); - add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL); - add->kmap_ops = kzalloc(sizeof(add->kmap_ops[0]) * count, GFP_KERNEL); - add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL); + add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL); + add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL); + add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); + add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); + add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); if (NULL == add->grants || NULL == add->map_ops || NULL == add->unmap_ops || diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 81c3ce6b8bbe..1906125eab49 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -35,6 +35,7 @@ #include <linux/vmalloc.h> #include <linux/export.h> #include <asm/xen/hypervisor.h> +#include <asm/xen/page.h> #include <xen/interface/xen.h> #include <xen/interface/event_channel.h> #include <xen/events.h> @@ -436,19 +437,20 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) { struct gnttab_map_grant_ref op = { - .flags = GNTMAP_host_map, + .flags = GNTMAP_host_map | GNTMAP_contains_pte, .ref = gnt_ref, .dom = dev->otherend_id, }; struct vm_struct *area; + pte_t *pte; *vaddr = NULL; - area = alloc_vm_area(PAGE_SIZE); + area = alloc_vm_area(PAGE_SIZE, &pte); if (!area) return -ENOMEM; - op.host_addr = (unsigned long)area->addr; + op.host_addr = arbitrary_virt_to_machine(pte).maddr; if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) BUG(); @@ -527,6 +529,7 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) struct gnttab_unmap_grant_ref op = { .host_addr = (unsigned long)vaddr, }; + unsigned int level; /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) * method so that we don't have to muck with vmalloc internals here. @@ -548,6 +551,8 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) } op.handle = (grant_handle_t)area->phys_addr; + op.host_addr = arbitrary_virt_to_machine( + lookup_address((unsigned long)vaddr, &level)).maddr; if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) BUG(); |