diff options
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r-- | drivers/ata/sata_mv.c | 486 |
1 files changed, 308 insertions, 178 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 7e72463a90eb..3c1b5c9027db 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -29,7 +29,13 @@ I distinctly remember a couple workarounds (one related to PCI-X) are still needed. - 4) Add NCQ support (easy to intermediate, once new-EH support appears) + 2) Improve/fix IRQ and error handling sequences. + + 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it). + + 4) Think about TCQ support here, and for libata in general + with controllers that suppport it via host-queuing hardware + (a software-only implementation could be a nightmare). 5) Investigate problems with PCI Message Signalled Interrupts (MSI). @@ -53,8 +59,6 @@ Target mode, for those without docs, is the ability to directly connect two SATA controllers. - 13) Verify that 7042 is fully supported. I only have a 6042. - */ @@ -73,7 +77,7 @@ #include <linux/libata.h> #define DRV_NAME "sata_mv" -#define DRV_VERSION "1.01" +#define DRV_VERSION "1.20" enum { /* BAR's are enumerated in terms of pci_resource_start() terms */ @@ -107,14 +111,12 @@ enum { /* CRQB needs alignment on a 1KB boundary. Size == 1KB * CRPB needs alignment on a 256B boundary. Size == 256B - * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B */ MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), - MV_MAX_SG_CT = 176, + MV_MAX_SG_CT = 256, MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), - MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), MV_PORTS_PER_HC = 4, /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ @@ -125,6 +127,9 @@ enum { /* Host Flags */ MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ + /* SoC integrated controllers, no PCI interface */ + MV_FLAG_SOC = (1 << 28), + MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING, @@ -170,7 +175,7 @@ enum { PCIE_IRQ_CAUSE_OFS = 0x1900, PCIE_IRQ_MASK_OFS = 0x1910, - PCIE_UNMASK_ALL_IRQS = 0x70a, /* assorted bits */ + PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, HC_MAIN_IRQ_MASK_OFS = 0x1d64, @@ -210,6 +215,7 @@ enum { /* SATA registers */ SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ SATA_ACTIVE_OFS = 0x350, + SATA_FIS_IRQ_CAUSE_OFS = 0x364, PHY_MODE3 = 0x310, PHY_MODE4 = 0x314, PHY_MODE2 = 0x330, @@ -222,11 +228,11 @@ enum { /* Port registers */ EDMA_CFG_OFS = 0, - EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ - EDMA_CFG_NCQ = (1 << 5), - EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ - EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ - EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ + EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ + EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ + EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ + EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ + EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ EDMA_ERR_IRQ_CAUSE_OFS = 0x8, EDMA_ERR_IRQ_MASK_OFS = 0xc, @@ -244,14 +250,33 @@ enum { EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ + EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ - EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), + EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ + EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ + EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ + EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ + EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ + EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ + EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ + EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ + EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ + EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ + EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ + EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ + EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ EDMA_ERR_OVERRUN_5 = (1 << 5), EDMA_ERR_UNDERRUN_5 = (1 << 6), + + EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | + EDMA_ERR_LNK_CTRL_RX_1 | + EDMA_ERR_LNK_CTRL_RX_3 | + EDMA_ERR_LNK_CTRL_TX, + EDMA_EH_FREEZE = EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_DEV_DCON | @@ -311,12 +336,14 @@ enum { /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ + MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ }; #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) +#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) enum { /* DMA boundary 0xffff is required by the s/g splitting @@ -379,8 +406,8 @@ struct mv_port_priv { dma_addr_t crqb_dma; struct mv_crpb *crpb; dma_addr_t crpb_dma; - struct mv_sg *sg_tbl; - dma_addr_t sg_tbl_dma; + struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; + dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; unsigned int req_idx; unsigned int resp_idx; @@ -400,6 +427,14 @@ struct mv_host_priv { u32 irq_cause_ofs; u32 irq_mask_ofs; u32 unmask_all_irqs; + /* + * These consistent DMA memory pools give us guaranteed + * alignment for hardware-accessed data structures, + * and less memory waste in accomplishing the alignment. + */ + struct dma_pool *crqb_pool; + struct dma_pool *crpb_pool; + struct dma_pool *sg_tbl_pool; }; struct mv_hw_ops { @@ -411,7 +446,7 @@ struct mv_hw_ops { int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); - void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio); + void (*reset_bus)(struct ata_host *host, void __iomem *mmio); }; static void mv_irq_clear(struct ata_port *ap); @@ -425,10 +460,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc); static void mv_qc_prep_iie(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); static void mv_error_handler(struct ata_port *ap); -static void mv_post_int_cmd(struct ata_queued_cmd *qc); static void mv_eh_freeze(struct ata_port *ap); static void mv_eh_thaw(struct ata_port *ap); -static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static void mv6_dev_config(struct ata_device *dev); static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); @@ -438,7 +472,7 @@ static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); -static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio); +static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); @@ -448,10 +482,17 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); -static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); +static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no); +static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, + void __iomem *port_mmio, int want_ncq); +static int __mv_stop_dma(struct ata_port *ap); +/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below + * because we have to allow room for worst case splitting of + * PRDs for 64K boundaries in mv_fill_sg(). + */ static struct scsi_host_template mv5_sht = { .module = THIS_MODULE, .name = DRV_NAME, @@ -475,7 +516,8 @@ static struct scsi_host_template mv6_sht = { .name = DRV_NAME, .ioctl = ata_scsi_ioctl, .queuecommand = ata_scsi_queuecmd, - .can_queue = ATA_DEF_QUEUE, + .change_queue_depth = ata_scsi_change_queue_depth, + .can_queue = MV_MAX_Q_DEPTH - 1, .this_id = ATA_SHT_THIS_ID, .sg_tablesize = MV_MAX_SG_CT / 2, .cmd_per_lun = ATA_SHT_CMD_PER_LUN, @@ -505,7 +547,6 @@ static const struct ata_port_operations mv5_ops = { .irq_on = ata_irq_on, .error_handler = mv_error_handler, - .post_internal_cmd = mv_post_int_cmd, .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, @@ -517,6 +558,7 @@ static const struct ata_port_operations mv5_ops = { }; static const struct ata_port_operations mv6_ops = { + .dev_config = mv6_dev_config, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .check_status = ata_check_status, @@ -533,9 +575,9 @@ static const struct ata_port_operations mv6_ops = { .irq_on = ata_irq_on, .error_handler = mv_error_handler, - .post_internal_cmd = mv_post_int_cmd, .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, + .qc_defer = ata_std_qc_defer, .scr_read = mv_scr_read, .scr_write = mv_scr_write, @@ -561,9 +603,9 @@ static const struct ata_port_operations mv_iie_ops = { .irq_on = ata_irq_on, .error_handler = mv_error_handler, - .post_internal_cmd = mv_post_int_cmd, .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, + .qc_defer = ata_std_qc_defer, .scr_read = mv_scr_read, .scr_write = mv_scr_write, @@ -592,26 +634,29 @@ static const struct ata_port_info mv_port_info[] = { .port_ops = &mv5_ops, }, { /* chip_604x */ - .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | + ATA_FLAG_NCQ, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_608x */ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | - MV_FLAG_DUAL_HC, + ATA_FLAG_NCQ | MV_FLAG_DUAL_HC, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_6042 */ - .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | + ATA_FLAG_NCQ, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, { /* chip_7042 */ - .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS, + .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | + ATA_FLAG_NCQ, .pio_mask = 0x1f, /* pio0-4 */ .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, @@ -648,13 +693,6 @@ static const struct pci_device_id mv_pci_tbl[] = { { } /* terminate list */ }; -static struct pci_driver mv_pci_driver = { - .name = DRV_NAME, - .id_table = mv_pci_tbl, - .probe = mv_init_one, - .remove = ata_pci_remove_one, -}; - static const struct mv_hw_ops mv5xxx_ops = { .phy_errata = mv5_phy_errata, .enable_leds = mv5_enable_leds, @@ -674,45 +712,6 @@ static const struct mv_hw_ops mv6xxx_ops = { }; /* - * module options - */ -static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ - - -/* move to PCI layer or libata core? */ -static int pci_go_64(struct pci_dev *pdev) -{ - int rc; - - if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { - rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); - if (rc) { - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "64-bit DMA enable failed\n"); - return rc; - } - } - } else { - rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit DMA enable failed\n"); - return rc; - } - rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit consistent DMA enable failed\n"); - return rc; - } - } - - return rc; -} - -/* * Functions */ @@ -815,19 +814,46 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, * LOCKING: * Inherited from caller. */ -static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv, - struct mv_port_priv *pp) +static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, + struct mv_port_priv *pp, u8 protocol) { + int want_ncq = (protocol == ATA_PROT_NCQ); + + if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { + int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); + if (want_ncq != using_ncq) + __mv_stop_dma(ap); + } if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { + struct mv_host_priv *hpriv = ap->host->private_data; + int hard_port = mv_hardport_from_port(ap->port_no); + void __iomem *hc_mmio = mv_hc_base_from_port( + ap->host->iomap[MV_PRIMARY_BAR], hard_port); + u32 hc_irq_cause, ipending; + /* clear EDMA event indicators, if any */ - writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS); + writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); + + /* clear EDMA interrupt indicator, if any */ + hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); + ipending = (DEV_IRQ << hard_port) | + (CRPB_DMA_DONE << hard_port); + if (hc_irq_cause & ipending) { + writelfl(hc_irq_cause & ~ipending, + hc_mmio + HC_IRQ_CAUSE_OFS); + } + + mv_edma_cfg(pp, hpriv, port_mmio, want_ncq); + + /* clear FIS IRQ Cause */ + writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); - mv_set_edma_ptrs(base, hpriv, pp); + mv_set_edma_ptrs(port_mmio, hpriv, pp); - writelfl(EDMA_EN, base + EDMA_CMD_OFS); + writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); pp->pp_flags |= MV_PP_FLAG_EDMA_EN; } - WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS))); + WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); } /** @@ -1003,38 +1029,76 @@ static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) return -EINVAL; } -static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, - void __iomem *port_mmio) +static void mv6_dev_config(struct ata_device *adev) { - u32 cfg = readl(port_mmio + EDMA_CFG_OFS); + /* + * We don't have hob_nsect when doing NCQ commands on Gen-II. + * See mv_qc_prep() for more info. + */ + if (adev->flags & ATA_DFLAG_NCQ) + if (adev->max_sectors > ATA_MAX_SECTORS) + adev->max_sectors = ATA_MAX_SECTORS; +} + +static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, + void __iomem *port_mmio, int want_ncq) +{ + u32 cfg; /* set up non-NCQ EDMA configuration */ - cfg &= ~(1 << 9); /* disable eQue */ + cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ - if (IS_GEN_I(hpriv)) { - cfg &= ~0x1f; /* clear queue depth */ + if (IS_GEN_I(hpriv)) cfg |= (1 << 8); /* enab config burst size mask */ - } - else if (IS_GEN_II(hpriv)) { - cfg &= ~0x1f; /* clear queue depth */ + else if (IS_GEN_II(hpriv)) cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; - cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */ - } else if (IS_GEN_IIE(hpriv)) { cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ cfg |= (1 << 22); /* enab 4-entry host queue cache */ - cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */ cfg |= (1 << 18); /* enab early completion */ cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ - cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ - cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */ } + if (want_ncq) { + cfg |= EDMA_CFG_NCQ; + pp->pp_flags |= MV_PP_FLAG_NCQ_EN; + } else + pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; + writelfl(cfg, port_mmio + EDMA_CFG_OFS); } +static void mv_port_free_dma_mem(struct ata_port *ap) +{ + struct mv_host_priv *hpriv = ap->host->private_data; + struct mv_port_priv *pp = ap->private_data; + int tag; + + if (pp->crqb) { + dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); + pp->crqb = NULL; + } + if (pp->crpb) { + dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); + pp->crpb = NULL; + } + /* + * For GEN_I, there's no NCQ, so we have only a single sg_tbl. + * For later hardware, we have one unique sg_tbl per NCQ tag. + */ + for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { + if (pp->sg_tbl[tag]) { + if (tag == 0 || !IS_GEN_I(hpriv)) + dma_pool_free(hpriv->sg_tbl_pool, + pp->sg_tbl[tag], + pp->sg_tbl_dma[tag]); + pp->sg_tbl[tag] = NULL; + } + } +} + /** * mv_port_start - Port specific init/start routine. * @ap: ATA channel to manipulate @@ -1051,51 +1115,47 @@ static int mv_port_start(struct ata_port *ap) struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp; void __iomem *port_mmio = mv_ap_base(ap); - void *mem; - dma_addr_t mem_dma; unsigned long flags; - int rc; + int tag, rc; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; - - mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, - GFP_KERNEL); - if (!mem) - return -ENOMEM; - memset(mem, 0, MV_PORT_PRIV_DMA_SZ); + ap->private_data = pp; rc = ata_pad_alloc(ap, dev); if (rc) return rc; - /* First item in chunk of DMA memory: - * 32-slot command request table (CRQB), 32 bytes each in size - */ - pp->crqb = mem; - pp->crqb_dma = mem_dma; - mem += MV_CRQB_Q_SZ; - mem_dma += MV_CRQB_Q_SZ; + pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); + if (!pp->crqb) + return -ENOMEM; + memset(pp->crqb, 0, MV_CRQB_Q_SZ); - /* Second item: - * 32-slot command response table (CRPB), 8 bytes each in size - */ - pp->crpb = mem; - pp->crpb_dma = mem_dma; - mem += MV_CRPB_Q_SZ; - mem_dma += MV_CRPB_Q_SZ; + pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); + if (!pp->crpb) + goto out_port_free_dma_mem; + memset(pp->crpb, 0, MV_CRPB_Q_SZ); - /* Third item: - * Table of scatter-gather descriptors (ePRD), 16 bytes each + /* + * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. + * For later hardware, we need one unique sg_tbl per NCQ tag. */ - pp->sg_tbl = mem; - pp->sg_tbl_dma = mem_dma; + for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { + if (tag == 0 || !IS_GEN_I(hpriv)) { + pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, + GFP_KERNEL, &pp->sg_tbl_dma[tag]); + if (!pp->sg_tbl[tag]) + goto out_port_free_dma_mem; + } else { + pp->sg_tbl[tag] = pp->sg_tbl[0]; + pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; + } + } spin_lock_irqsave(&ap->host->lock, flags); - mv_edma_cfg(ap, hpriv, port_mmio); - + mv_edma_cfg(pp, hpriv, port_mmio, 0); mv_set_edma_ptrs(port_mmio, hpriv, pp); spin_unlock_irqrestore(&ap->host->lock, flags); @@ -1104,8 +1164,11 @@ static int mv_port_start(struct ata_port *ap) * we'll be unable to send non-data, PIO, etc due to restricted access * to shadow regs. */ - ap->private_data = pp; return 0; + +out_port_free_dma_mem: + mv_port_free_dma_mem(ap); + return -ENOMEM; } /** @@ -1120,6 +1183,7 @@ static int mv_port_start(struct ata_port *ap) static void mv_port_stop(struct ata_port *ap) { mv_stop_dma(ap); + mv_port_free_dma_mem(ap); } /** @@ -1138,7 +1202,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) struct mv_sg *mv_sg, *last_sg = NULL; unsigned int si; - mv_sg = pp->sg_tbl; + mv_sg = pp->sg_tbl[qc->tag]; for_each_sg(qc->sg, sg, qc->n_elem, si) { dma_addr_t addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); @@ -1194,7 +1258,8 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) u16 flags = 0; unsigned in_index; - if (qc->tf.protocol != ATA_PROT_DMA) + if ((qc->tf.protocol != ATA_PROT_DMA) && + (qc->tf.protocol != ATA_PROT_NCQ)) return; /* Fill in command request block @@ -1203,15 +1268,14 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; - flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/ /* get current queue index from software */ in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; pp->crqb[in_index].sg_addr = - cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); + cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); pp->crqb[in_index].sg_addr_hi = - cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); + cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); cw = &pp->crqb[in_index].ata_cmd[0]; @@ -1231,13 +1295,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) case ATA_CMD_WRITE_FUA_EXT: mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); break; -#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ case ATA_CMD_FPDMA_READ: case ATA_CMD_FPDMA_WRITE: mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); break; -#endif /* FIXME: remove this line when NCQ added */ default: /* The only other commands EDMA supports in non-queued and * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none @@ -1286,7 +1348,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) unsigned in_index; u32 flags = 0; - if (qc->tf.protocol != ATA_PROT_DMA) + if ((qc->tf.protocol != ATA_PROT_DMA) && + (qc->tf.protocol != ATA_PROT_NCQ)) return; /* Fill in Gen IIE command request block @@ -1296,15 +1359,14 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; - flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really- - what we use as our tag */ + flags |= qc->tag << CRQB_HOSTQ_SHIFT; /* get current queue index from software */ in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; - crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); - crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); + crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); + crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); crqb->flags = cpu_to_le32(flags); tf = &qc->tf; @@ -1351,10 +1413,10 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; - struct mv_host_priv *hpriv = ap->host->private_data; u32 in_index; - if (qc->tf.protocol != ATA_PROT_DMA) { + if ((qc->tf.protocol != ATA_PROT_DMA) && + (qc->tf.protocol != ATA_PROT_NCQ)) { /* We're about to send a non-EDMA capable command to the * port. Turn off EDMA so there won't be problems accessing * shadow block, etc registers. @@ -1363,13 +1425,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) return ata_qc_issue_prot(qc); } - mv_start_dma(port_mmio, hpriv, pp); - - in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; - - /* until we do queuing, the queue should be empty at this point */ - WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); + mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); pp->req_idx++; @@ -1437,6 +1493,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) ata_ehi_hotplugged(ehi); ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? "dev disconnect" : "dev connect"); + action |= ATA_EH_HARDRESET; } if (IS_GEN_I(hpriv)) { @@ -1465,7 +1522,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) } /* Clear EDMA now that SERR cleanup done */ - writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); + writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); if (!err_mask) { err_mask = AC_ERR_OTHER; @@ -1538,23 +1595,17 @@ static void mv_intr_edma(struct ata_port *ap) * support for queueing. this works transparently for * queued and non-queued modes. */ - else if (IS_GEN_II(hpriv)) - tag = (le16_to_cpu(pp->crpb[out_index].id) - >> CRPB_IOID_SHIFT_6) & 0x3f; - - else /* IS_GEN_IIE */ - tag = (le16_to_cpu(pp->crpb[out_index].id) - >> CRPB_IOID_SHIFT_7) & 0x3f; + else + tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f; qc = ata_qc_from_tag(ap, tag); - /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS - * bits (WARNING: might not necessarily be associated - * with this command), which -should- be clear - * if all is well + /* For non-NCQ mode, the lower 8 bits of status + * are from EDMA_ERR_IRQ_CAUSE_OFS, + * which should be zero if all went well. */ status = le16_to_cpu(pp->crpb[out_index].flags); - if (unlikely(status & 0xff)) { + if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { mv_err_intr(ap, qc); return; } @@ -1715,20 +1766,21 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) struct ata_host *host = dev_instance; unsigned int hc, handled = 0, n_hcs; void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; - u32 irq_stat; + u32 irq_stat, irq_mask; + spin_lock(&host->lock); irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); + irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS); /* check the cases where we either have nothing pending or have read * a bogus register value which can indicate HW removal or PCI fault */ - if (!irq_stat || (0xffffffffU == irq_stat)) - return IRQ_NONE; + if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat)) + goto out_unlock; n_hcs = mv_get_hc_count(host->ports[0]->flags); - spin_lock(&host->lock); - if (unlikely(irq_stat & PCI_ERR)) { + if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) { mv_pci_error(host, mmio); handled = 1; goto out_unlock; /* skip all other HC irq handling */ @@ -1799,8 +1851,9 @@ static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) return -EINVAL; } -static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) +static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) { + struct pci_dev *pdev = to_pci_dev(host->dev); int early_5080; early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); @@ -1811,7 +1864,7 @@ static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); } - mv_reset_pci_bus(pdev, mmio); + mv_reset_pci_bus(host, mmio); } static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) @@ -1935,9 +1988,8 @@ static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, #undef ZERO #define ZERO(reg) writel(0, mmio + (reg)) -static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) +static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) { - struct ata_host *host = dev_get_drvdata(&pdev->dev); struct mv_host_priv *hpriv = host->private_data; u32 tmp; @@ -2329,11 +2381,6 @@ static void mv_error_handler(struct ata_port *ap) mv_hardreset, mv_postreset); } -static void mv_post_int_cmd(struct ata_queued_cmd *qc) -{ - mv_stop_dma(qc->ap); -} - static void mv_eh_freeze(struct ata_port *ap) { void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR]; @@ -2427,8 +2474,8 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); - /* unmask all EDMA error interrupts */ - writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); + /* unmask all non-transient EDMA error interrupts */ + writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", readl(port_mmio + EDMA_CFG_OFS), @@ -2586,7 +2633,6 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) static int mv_init_host(struct ata_host *host, unsigned int board_idx) { int rc = 0, n_hc, port, hc; - struct pci_dev *pdev = to_pci_dev(host->dev); void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; struct mv_host_priv *hpriv = host->private_data; @@ -2607,7 +2653,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) goto done; hpriv->ops->reset_flash(hpriv, mmio); - hpriv->ops->reset_bus(pdev, mmio); + hpriv->ops->reset_bus(host, mmio); hpriv->ops->enable_leds(hpriv, mmio); for (port = 0; port < host->n_ports; port++) { @@ -2630,8 +2676,10 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) mv_port_init(&ap->ioaddr, port_mmio); +#ifdef CONFIG_PCI ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); +#endif } for (hc = 0; hc < n_hc; hc++) { @@ -2668,6 +2716,55 @@ done: return rc; } +#ifdef CONFIG_PCI +static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); + +static struct pci_driver mv_pci_driver = { + .name = DRV_NAME, + .id_table = mv_pci_tbl, + .probe = mv_init_one, + .remove = ata_pci_remove_one, +}; + +/* + * module options + */ +static int msi; /* Use PCI msi; either zero (off, default) or non-zero */ + + +/* move to PCI layer or libata core? */ +static int pci_go_64(struct pci_dev *pdev) +{ + int rc; + + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { + rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + if (rc) { + rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "64-bit DMA enable failed\n"); + return rc; + } + } + } else { + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit DMA enable failed\n"); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; + } + } + + return rc; +} + /** * mv_print_info - Dump key info to kernel log for perusal. * @host: ATA host to print info about @@ -2710,6 +2807,26 @@ static void mv_print_info(struct ata_host *host) scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); } +static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) +{ + hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, + MV_CRQB_Q_SZ, 0); + if (!hpriv->crqb_pool) + return -ENOMEM; + + hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, + MV_CRPB_Q_SZ, 0); + if (!hpriv->crpb_pool) + return -ENOMEM; + + hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, + MV_SG_TBL_SZ, 0); + if (!hpriv->sg_tbl_pool) + return -ENOMEM; + + return 0; +} + /** * mv_init_one - handle a positive probe of a Marvell host * @pdev: PCI device found @@ -2755,6 +2872,10 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; + rc = mv_create_dma_pools(hpriv, &pdev->dev); + if (rc) + return rc; + /* initialize adapter */ rc = mv_init_host(host, board_idx); if (rc) @@ -2772,15 +2893,22 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); } +#endif static int __init mv_init(void) { - return pci_register_driver(&mv_pci_driver); + int rc = -ENODEV; +#ifdef CONFIG_PCI + rc = pci_register_driver(&mv_pci_driver); +#endif + return rc; } static void __exit mv_exit(void) { +#ifdef CONFIG_PCI pci_unregister_driver(&mv_pci_driver); +#endif } MODULE_AUTHOR("Brett Russ"); @@ -2789,8 +2917,10 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, mv_pci_tbl); MODULE_VERSION(DRV_VERSION); +#ifdef CONFIG_PCI module_param(msi, int, 0444); MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); +#endif module_init(mv_init); module_exit(mv_exit); |