diff options
Diffstat (limited to 'drivers/scsi/megaraid')
-rw-r--r-- | drivers/scsi/megaraid/megaraid_sas.h | 199 | ||||
-rw-r--r-- | drivers/scsi/megaraid/megaraid_sas_base.c | 648 | ||||
-rw-r--r-- | drivers/scsi/megaraid/megaraid_sas_fp.c | 468 | ||||
-rw-r--r-- | drivers/scsi/megaraid/megaraid_sas_fusion.c | 1334 | ||||
-rw-r--r-- | drivers/scsi/megaraid/megaraid_sas_fusion.h | 412 |
5 files changed, 2328 insertions, 733 deletions
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index fdd519c1dd57..e7e5974e1a2c 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -35,8 +35,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.812.07.00-rc1" -#define MEGASAS_RELDATE "August 22, 2016" +#define MEGASAS_VERSION "07.701.16.00-rc1" +#define MEGASAS_RELDATE "February 2, 2017" /* * Device IDs @@ -56,6 +56,11 @@ #define PCI_DEVICE_ID_LSI_INTRUDER_24 0x00cf #define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052 #define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053 +#define PCI_DEVICE_ID_LSI_VENTURA 0x0014 +#define PCI_DEVICE_ID_LSI_HARPOON 0x0016 +#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017 +#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B +#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT 0x001C /* * Intel HBA SSDIDs @@ -100,7 +105,7 @@ */ /* - * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for + * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for * protocol between the software and firmware. Commands are issued using * "message frames" */ @@ -690,6 +695,18 @@ struct MR_PD_INFO { u8 reserved1[512-428]; } __packed; +/* + * Definition of structure used to expose attributes of VD or JBOD + * (this structure is to be filled by firmware when MR_DCMD_DRV_GET_TARGET_PROP + * is fired by driver) + */ +struct MR_TARGET_PROPERTIES { + u32 max_io_size_kb; + u32 device_qdepth; + u32 sector_size; + u8 reserved[500]; +} __packed; + /* * defines the physical drive address structure */ @@ -728,7 +745,6 @@ struct megasas_pd_list { u16 tid; u8 driveType; u8 driveState; - u8 interface; } __packed; /* @@ -1312,7 +1328,55 @@ struct megasas_ctrl_info { #endif } adapterOperations3; - u8 pad[0x800-0x7EC]; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:7; + /* Indicates whether the CPLD image is part of + * the package and stored in flash + */ + u8 cpld_in_flash:1; +#else + u8 cpld_in_flash:1; + u8 reserved:7; +#endif + u8 reserved1[3]; + /* Null terminated string. Has the version + * information if cpld_in_flash = FALSE + */ + u8 userCodeDefinition[12]; + } cpld; /* Valid only if upgradableCPLD is TRUE */ + + struct { + #if defined(__BIG_ENDIAN_BITFIELD) + u16 reserved:8; + u16 fw_swaps_bbu_vpd_info:1; + u16 support_pd_map_target_id:1; + u16 support_ses_ctrl_in_multipathcfg:1; + u16 image_upload_supported:1; + u16 support_encrypted_mfc:1; + u16 supported_enc_algo:1; + u16 support_ibutton_less:1; + u16 ctrl_info_ext_supported:1; + #else + + u16 ctrl_info_ext_supported:1; + u16 support_ibutton_less:1; + u16 supported_enc_algo:1; + u16 support_encrypted_mfc:1; + u16 image_upload_supported:1; + /* FW supports LUN based association and target port based */ + u16 support_ses_ctrl_in_multipathcfg:1; + /* association for the SES device connected in multipath mode */ + /* FW defines Jbod target Id within MR_PD_CFG_SEQ */ + u16 support_pd_map_target_id:1; + /* FW swaps relevant fields in MR_BBU_VPD_INFO_FIXED to + * provide the data in little endian order + */ + u16 fw_swaps_bbu_vpd_info:1; + u16 reserved:8; + #endif + } adapter_operations4; + u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */ } __packed; /* @@ -1339,12 +1403,15 @@ struct megasas_ctrl_info { #define MEGASAS_FW_BUSY 1 -#define VD_EXT_DEBUG 0 +/* Driver's internal Logging levels*/ +#define OCR_LOGS (1 << 0) #define SCAN_PD_CHANNEL 0x1 #define SCAN_VD_CHANNEL 0x2 #define MEGASAS_KDUMP_QUEUE_DEPTH 100 +#define MR_LARGE_IO_MIN_SIZE (32 * 1024) +#define MR_R1_LDIO_PIGGYBACK_DEFAULT 4 enum MR_SCSI_CMD_TYPE { READ_WRITE_LDIO = 0, @@ -1391,7 +1458,7 @@ enum FW_BOOT_CONTEXT { */ #define MEGASAS_INT_CMDS 32 #define MEGASAS_SKINNY_INT_CMDS 5 -#define MEGASAS_FUSION_INTERNAL_CMDS 5 +#define MEGASAS_FUSION_INTERNAL_CMDS 8 #define MEGASAS_FUSION_IOCTL_CMDS 3 #define MEGASAS_MFI_IOCTL_CMDS 27 @@ -1429,13 +1496,19 @@ enum FW_BOOT_CONTEXT { #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 #define MR_MAX_MSIX_REG_ARRAY 16 #define MR_RDPQ_MODE_OFFSET 0X00800000 + +#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT 16 +#define MR_MAX_RAID_MAP_SIZE_MASK 0x1FF +#define MR_MIN_MAP_SIZE 0x10000 +/* 64k */ + #define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000 /* * register set for both 1068 and 1078 controllers * structure extended for 1078 registers */ - + struct megasas_register_set { u32 doorbell; /*0000h*/ u32 fusion_seq_offset; /*0004h*/ @@ -1471,14 +1544,14 @@ struct megasas_register_set { u32 outbound_scratch_pad ; /*00B0h*/ u32 outbound_scratch_pad_2; /*00B4h*/ u32 outbound_scratch_pad_3; /*00B8h*/ + u32 outbound_scratch_pad_4; /*00BCh*/ - u32 reserved_4; /*00BCh*/ u32 inbound_low_queue_port ; /*00C0h*/ u32 inbound_high_queue_port ; /*00C4h*/ - u32 reserved_5; /*00C8h*/ + u32 inbound_single_queue_port; /*00C8h*/ u32 res_6[11]; /*CCh*/ u32 host_diag; u32 seq_offset; @@ -1544,33 +1617,35 @@ union megasas_sgl_frame { typedef union _MFI_CAPABILITIES { struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved:20; - u32 support_qd_throttling:1; - u32 support_fp_rlbypass:1; - u32 support_vfid_in_ioframe:1; - u32 support_ext_io_size:1; - u32 support_ext_queue_depth:1; - u32 security_protocol_cmds_fw:1; - u32 support_core_affinity:1; - u32 support_ndrive_r1_lb:1; - u32 support_max_255lds:1; - u32 support_fastpath_wb:1; - u32 support_additional_msix:1; - u32 support_fp_remote_lun:1; + u32 reserved:19; + u32 support_pd_map_target_id:1; + u32 support_qd_throttling:1; + u32 support_fp_rlbypass:1; + u32 support_vfid_in_ioframe:1; + u32 support_ext_io_size:1; + u32 support_ext_queue_depth:1; + u32 security_protocol_cmds_fw:1; + u32 support_core_affinity:1; + u32 support_ndrive_r1_lb:1; + u32 support_max_255lds:1; + u32 support_fastpath_wb:1; + u32 support_additional_msix:1; + u32 support_fp_remote_lun:1; #else - u32 support_fp_remote_lun:1; - u32 support_additional_msix:1; - u32 support_fastpath_wb:1; - u32 support_max_255lds:1; - u32 support_ndrive_r1_lb:1; - u32 support_core_affinity:1; - u32 security_protocol_cmds_fw:1; - u32 support_ext_queue_depth:1; - u32 support_ext_io_size:1; - u32 support_vfid_in_ioframe:1; - u32 support_fp_rlbypass:1; - u32 support_qd_throttling:1; - u32 reserved:20; + u32 support_fp_remote_lun:1; + u32 support_additional_msix:1; + u32 support_fastpath_wb:1; + u32 support_max_255lds:1; + u32 support_ndrive_r1_lb:1; + u32 support_core_affinity:1; + u32 security_protocol_cmds_fw:1; + u32 support_ext_queue_depth:1; + u32 support_ext_io_size:1; + u32 support_vfid_in_ioframe:1; + u32 support_fp_rlbypass:1; + u32 support_qd_throttling:1; + u32 support_pd_map_target_id:1; + u32 reserved:19; #endif } mfi_capabilities; __le32 reg; @@ -1803,6 +1878,8 @@ union megasas_frame { struct MR_PRIV_DEVICE { bool is_tm_capable; bool tm_busy; + atomic_t r1_ldio_hint; + u8 interface_type; }; struct megasas_cmd; @@ -1994,17 +2071,24 @@ struct MR_DRV_SYSTEM_INFO { }; enum MR_PD_TYPE { - UNKNOWN_DRIVE = 0, - PARALLEL_SCSI = 1, - SAS_PD = 2, - SATA_PD = 3, - FC_PD = 4, + UNKNOWN_DRIVE = 0, + PARALLEL_SCSI = 1, + SAS_PD = 2, + SATA_PD = 3, + FC_PD = 4, + NVME_PD = 5, }; /* JBOD Queue depth definitions */ #define MEGASAS_SATA_QD 32 #define MEGASAS_SAS_QD 64 #define MEGASAS_DEFAULT_PD_QD 64 +#define MEGASAS_NVME_QD 32 + +#define MR_DEFAULT_NVME_PAGE_SIZE 4096 +#define MR_DEFAULT_NVME_PAGE_SHIFT 12 +#define MR_DEFAULT_NVME_MDTS_KB 128 +#define MR_NVME_PAGE_SIZE_MASK 0x000000FF struct megasas_instance { @@ -2022,6 +2106,8 @@ struct megasas_instance { dma_addr_t hb_host_mem_h; struct MR_PD_INFO *pd_info; dma_addr_t pd_info_h; + struct MR_TARGET_PROPERTIES *tgt_prop; + dma_addr_t tgt_prop_h; __le32 *reply_queue; dma_addr_t reply_queue_h; @@ -2039,6 +2125,7 @@ struct megasas_instance { u32 crash_dump_drv_support; u32 crash_dump_app_support; u32 secure_jbod_support; + u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */ bool use_seqnum_jbod_fp; /* Added for PD sequence */ spinlock_t crashdump_lock; @@ -2051,6 +2138,7 @@ struct megasas_instance { u16 max_num_sge; u16 max_fw_cmds; + u16 max_mpt_cmds; u16 max_mfi_cmds; u16 max_scsi_cmds; u16 ldio_threshold; @@ -2065,6 +2153,7 @@ struct megasas_instance { /* used to sync fire the cmd to fw */ spinlock_t hba_lock; /* used to synch producer, consumer ptrs in dpc */ + spinlock_t stream_lock; spinlock_t completion_lock; struct dma_pool *frame_dma_pool; struct dma_pool *sense_dma_pool; @@ -2087,6 +2176,11 @@ struct megasas_instance { atomic_t fw_outstanding; atomic_t ldio_outstanding; atomic_t fw_reset_no_pci_access; + atomic_t ieee_sgl; + atomic_t prp_sgl; + atomic_t sge_holes_type1; + atomic_t sge_holes_type2; + atomic_t sge_holes_type3; struct megasas_instance_template *instancet; struct tasklet_struct isr_tasklet; @@ -2142,6 +2236,13 @@ struct megasas_instance { u8 is_rdpq; bool dev_handle; bool fw_sync_cache_support; + u32 mfi_frame_size; + bool is_ventura; + bool msix_combined; + u16 max_raid_mapsize; + /* preffered count to send as LDIO irrspective of FP capable.*/ + u8 r1_ldio_hint_default; + u32 nvme_page_size; }; struct MR_LD_VF_MAP { u32 size; @@ -2230,12 +2331,12 @@ struct megasas_instance_template { u32 (*init_adapter)(struct megasas_instance *); u32 (*build_and_issue_cmd) (struct megasas_instance *, struct scsi_cmnd *); - int (*issue_dcmd)(struct megasas_instance *instance, + void (*issue_dcmd)(struct megasas_instance *instance, struct megasas_cmd *cmd); }; -#define MEGASAS_IS_LOGICAL(scp) \ - ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) +#define MEGASAS_IS_LOGICAL(sdev) \ + ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) #define MEGASAS_DEV_INDEX(scp) \ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ @@ -2346,7 +2447,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN); -u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map); +u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map); struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map); @@ -2354,13 +2455,16 @@ __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); __le16 get_updated_dev_handle(struct megasas_instance *instance, - struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); + struct LD_LOAD_BALANCE_INFO *lbInfo, + struct IO_REQUEST_INFO *in_info, + struct MR_DRV_RAID_MAP_ALL *drv_map); void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo); int megasas_get_ctrl_info(struct megasas_instance *instance); /* PD sequence */ int megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend); +void megasas_set_dynamic_target_properties(struct scsi_device *sdev); int megasas_set_crash_dump_params(struct megasas_instance *instance, u8 crash_buf_state); void megasas_free_host_crash_buffer(struct megasas_instance *instance); @@ -2382,4 +2486,7 @@ void megasas_update_sdev_properties(struct scsi_device *sdev); int megasas_reset_fusion(struct Scsi_Host *shost, int reason); int megasas_task_abort_fusion(struct scsi_cmnd *scmd); int megasas_reset_target_fusion(struct scsi_cmnd *scmd); +u32 mega_mod64(u64 dividend, u32 divisor); +int megasas_alloc_fusion_context(struct megasas_instance *instance); +void megasas_free_fusion_context(struct megasas_instance *instance); #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index d5cf15eb8c5e..7ac9a9ee9bd4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -43,6 +43,7 @@ #include <linux/uio.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <asm/unaligned.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/blkdev.h> @@ -116,8 +117,10 @@ static int megasas_ld_list_query(struct megasas_instance *instance, static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word); -static int -megasas_get_pd_info(struct megasas_instance *instance, u16 device_id); +static void megasas_get_pd_info(struct megasas_instance *instance, + struct scsi_device *sdev); +static int megasas_get_target_prop(struct megasas_instance *instance, + struct scsi_device *sdev); /* * PCI ID table for all supported controllers */ @@ -155,6 +158,12 @@ static struct pci_device_id megasas_pci_table[] = { /* Intruder 24 port*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)}, {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)}, + /* VENTURA */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)}, + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)}, {} }; @@ -196,12 +205,12 @@ void megasas_fusion_ocr_wq(struct work_struct *work); static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, int initial); -int +void megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); - return 0; + return; } /** @@ -259,6 +268,8 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) cmd->scmd = NULL; cmd->frame_count = 0; cmd->flags = 0; + memset(cmd->frame, 0, instance->mfi_frame_size); + cmd->frame->io.context = cpu_to_le32(cmd->index); if (!fusion && reset_devices) cmd->frame->hdr.cmd = MFI_CMD_INVALID; list_add(&cmd->list, (&instance->cmd_pool)->next); @@ -989,13 +1000,14 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS; frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); - if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || - (instance->instancet->issue_dcmd(instance, cmd))) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_NOT_FIRED; } + instance->instancet->issue_dcmd(instance, cmd); + return wait_and_poll(instance, cmd, instance->requestorId ? MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS); } @@ -1017,13 +1029,14 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, int ret = 0; cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; - if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || - (instance->instancet->issue_dcmd(instance, cmd))) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_NOT_FIRED; } + instance->instancet->issue_dcmd(instance, cmd); + if (timeout) { ret = wait_event_timeout(instance->int_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); @@ -1081,13 +1094,14 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, cmd->sync_cmd = 1; cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; - if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) || - (instance->instancet->issue_dcmd(instance, cmd))) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return DCMD_NOT_FIRED; } + instance->instancet->issue_dcmd(instance, cmd); + if (timeout) { ret = wait_event_timeout(instance->abort_cmd_wait_q, cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); @@ -1273,7 +1287,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, u16 flags = 0; struct megasas_pthru_frame *pthru; - is_logical = MEGASAS_IS_LOGICAL(scp); + is_logical = MEGASAS_IS_LOGICAL(scp->device); device_id = MEGASAS_DEV_INDEX(scp); pthru = (struct megasas_pthru_frame *)cmd->frame; @@ -1513,11 +1527,11 @@ inline int megasas_cmd_type(struct scsi_cmnd *cmd) case WRITE_6: case READ_16: case WRITE_16: - ret = (MEGASAS_IS_LOGICAL(cmd)) ? + ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? READ_WRITE_LDIO : READ_WRITE_SYSPDIO; break; default: - ret = (MEGASAS_IS_LOGICAL(cmd)) ? + ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; } return ret; @@ -1537,7 +1551,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance) struct megasas_io_frame *ldio; struct megasas_pthru_frame *pthru; u32 sgcount; - u32 max_cmd = instance->max_fw_cmds; + u16 max_cmd = instance->max_fw_cmds; dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); @@ -1662,7 +1676,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) /* Check for an mpio path and adjust behavior */ if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) { if (megasas_check_mpio_paths(instance, scmd) == - (DID_RESET << 16)) { + (DID_REQUEUE << 16)) { return SCSI_MLQUEUE_HOST_BUSY; } else { scmd->result = DID_NO_CONNECT << 16; @@ -1693,15 +1707,16 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) scmd->result = 0; - if (MEGASAS_IS_LOGICAL(scmd) && + if (MEGASAS_IS_LOGICAL(scmd->device) && (scmd->device->id >= instance->fw_supported_vd_count || scmd->device->lun)) { scmd->result = DID_BAD_TARGET << 16; goto out_done; } - if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) && - (!instance->fw_sync_cache_support)) { + if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && + MEGASAS_IS_LOGICAL(scmd->device) && + (!instance->fw_sync_cache_support)) { scmd->result = DID_OK << 16; goto out_done; } @@ -1728,16 +1743,21 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no) } /* -* megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities +* megasas_set_dynamic_target_properties - +* Device property set by driver may not be static and it is required to be +* updated after OCR +* +* set tm_capable. +* set dma alignment (only for eedp protection enable vd). * * @sdev: OS provided scsi device * * Returns void */ -void megasas_update_sdev_properties(struct scsi_device *sdev) +void megasas_set_dynamic_target_properties(struct scsi_device *sdev) { - u16 pd_index = 0; - u32 device_id, ld; + u16 pd_index = 0, ld; + u32 device_id; struct megasas_instance *instance; struct fusion_context *fusion; struct MR_PRIV_DEVICE *mr_device_priv_data; @@ -1749,67 +1769,129 @@ void megasas_update_sdev_properties(struct scsi_device *sdev) fusion = instance->ctrl_context; mr_device_priv_data = sdev->hostdata; - if (!fusion) + if (!fusion || !mr_device_priv_data) return; - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && - instance->use_seqnum_jbod_fp) { - pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + - sdev->id; - pd_sync = (void *)fusion->pd_seq_sync - [(instance->pd_seq_map_id - 1) & 1]; - mr_device_priv_data->is_tm_capable = - pd_sync->seq[pd_index].capability.tmCapable; - } else { + if (MEGASAS_IS_LOGICAL(sdev)) { device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; ld = MR_TargetIdToLdGet(device_id, local_map_ptr); + if (ld >= instance->fw_supported_vd_count) + return; raid = MR_LdRaidGet(ld, local_map_ptr); if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) blk_queue_update_dma_alignment(sdev->request_queue, 0x7); + mr_device_priv_data->is_tm_capable = raid->capability.tmCapable; + } else if (instance->use_seqnum_jbod_fp) { + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + pd_sync = (void *)fusion->pd_seq_sync + [(instance->pd_seq_map_id - 1) & 1]; + mr_device_priv_data->is_tm_capable = + pd_sync->seq[pd_index].capability.tmCapable; } } -static void megasas_set_device_queue_depth(struct scsi_device *sdev) +/* + * megasas_set_nvme_device_properties - + * set nomerges=2 + * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K). + * set maximum io transfer = MDTS of NVME device provided by MR firmware. + * + * MR firmware provides value in KB. Caller of this function converts + * kb into bytes. + * + * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size, + * MR firmware provides value 128 as (32 * 4K) = 128K. + * + * @sdev: scsi device + * @max_io_size: maximum io transfer size + * + */ +static inline void +megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) { - u16 pd_index = 0; - int ret = DCMD_FAILED; struct megasas_instance *instance; + u32 mr_nvme_pg_size; - instance = megasas_lookup_instance(sdev->host->host_no); + instance = (struct megasas_instance *)sdev->host->hostdata; + mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, + MR_DEFAULT_NVME_PAGE_SIZE); - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { - pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; + blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); - if (instance->pd_info) { - mutex_lock(&instance->hba_mutex); - ret = megasas_get_pd_info(instance, pd_index); - mutex_unlock(&instance->hba_mutex); - } + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue); + blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); +} - if (ret != DCMD_SUCCESS) - return; - if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { +/* + * megasas_set_static_target_properties - + * Device property set by driver are static and it is not required to be + * updated after OCR. + * + * set io timeout + * set device queue depth + * set nvme device properties. see - megasas_set_nvme_device_properties + * + * @sdev: scsi device + * @is_target_prop true, if fw provided target properties. + */ +static void megasas_set_static_target_properties(struct scsi_device *sdev, + bool is_target_prop) +{ + u16 target_index = 0; + u8 interface_type; + u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; + u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; + u32 tgt_device_qd; + struct megasas_instance *instance; + struct MR_PRIV_DEVICE *mr_device_priv_data; - switch (instance->pd_list[pd_index].interface) { - case SAS_PD: - scsi_change_queue_depth(sdev, MEGASAS_SAS_QD); - break; + instance = megasas_lookup_instance(sdev->host->host_no); + mr_device_priv_data = sdev->hostdata; + interface_type = mr_device_priv_data->interface_type; - case SATA_PD: - scsi_change_queue_depth(sdev, MEGASAS_SATA_QD); - break; + /* + * The RAID firmware may require extended timeouts. + */ + blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); - default: - scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD); - } - } + target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; + + switch (interface_type) { + case SAS_PD: + device_qd = MEGASAS_SAS_QD; + break; + case SATA_PD: + device_qd = MEGASAS_SATA_QD; + break; + case NVME_PD: + device_qd = MEGASAS_NVME_QD; + break; + } + + if (is_target_prop) { + tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth); + if (tgt_device_qd && + (tgt_device_qd <= instance->host->can_queue)) + device_qd = tgt_device_qd; + + /* max_io_size_kb will be set to non zero for + * nvme based vd and syspd. + */ + max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); } + + if (instance->nvme_page_size && max_io_size_kb) + megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); + + scsi_change_queue_depth(sdev, device_qd); + } @@ -1817,11 +1899,12 @@ static int megasas_slave_configure(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance; + int ret_target_prop = DCMD_FAILED; + bool is_target_prop = false; instance = megasas_lookup_instance(sdev->host->host_no); if (instance->pd_list_not_supported) { - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && - sdev->type == TYPE_DISK) { + if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if (instance->pd_list[pd_index].driveState != @@ -1829,14 +1912,25 @@ static int megasas_slave_configure(struct scsi_device *sdev) return -ENXIO; } } - megasas_set_device_queue_depth(sdev); - megasas_update_sdev_properties(sdev); - /* - * The RAID firmware may require extended timeouts. + mutex_lock(&instance->hba_mutex); + /* Send DCMD to Firmware and cache the information */ + if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev)) + megasas_get_pd_info(instance, sdev); + + /* Some ventura firmware may not have instance->nvme_page_size set. + * Do not send MR_DCMD_DRV_GET_TARGET_PROP */ - blk_queue_rq_timeout(sdev->request_queue, - scmd_timeout * HZ); + if ((instance->tgt_prop) && (instance->nvme_page_size)) + ret_target_prop = megasas_get_target_prop(instance, sdev); + + is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; + megasas_set_static_target_properties(sdev, is_target_prop); + + mutex_unlock(&instance->hba_mutex); + + /* This sdev property may change post OCR */ + megasas_set_dynamic_target_properties(sdev); return 0; } @@ -1848,7 +1942,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev) struct MR_PRIV_DEVICE *mr_device_priv_data; instance = megasas_lookup_instance(sdev->host->host_no); - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { + if (!MEGASAS_IS_LOGICAL(sdev)) { /* * Open the OS scan to the SYSTEM PD */ @@ -2483,7 +2577,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) struct megasas_cmd, list); list_del_init(&reset_cmd->list); if (reset_cmd->scmd) { - reset_cmd->scmd->result = DID_RESET << 16; + reset_cmd->scmd->result = DID_REQUEUE << 16; dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n", reset_index, reset_cmd, reset_cmd->scmd->cmnd[0]); @@ -2651,6 +2745,24 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) } /** + * megasas_dump_frame - This function will dump MPT/MFI frame + */ +static inline void +megasas_dump_frame(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + printk(KERN_INFO "IO request frame:\n\t"); + for (i = 0; i < sz / sizeof(__le32); i++) { + if (i && ((i % 8) == 0)) + printk("\n\t"); + printk("%08x ", le32_to_cpu(mfp[i])); + } + printk("\n"); +} + +/** * megasas_reset_bus_host - Bus & host reset handler entry point */ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) @@ -2660,12 +2772,26 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) instance = (struct megasas_instance *)scmd->device->host->hostdata; + scmd_printk(KERN_INFO, scmd, + "Controller reset is requested due to IO timeout\n" + "SCSI command pointer: (%p)\t SCSI host state: %d\t" + " SCSI host busy: %d\t FW outstanding: %d\n", + scmd, scmd->device->host->shost_state, + atomic_read((atomic_t *)&scmd->device->host->host_busy), + atomic_read(&instance->fw_outstanding)); + /* * First wait for all commands to complete */ - if (instance->ctrl_context) - ret = megasas_reset_fusion(scmd->device->host, 1); - else + if (instance->ctrl_context) { + struct megasas_cmd_fusion *cmd; + cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr; + if (cmd) + megasas_dump_frame(cmd->io_request, + sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); + ret = megasas_reset_fusion(scmd->device->host, + SCSIIO_TIMEOUT_OCR); + } else ret = megasas_generic_reset(scmd); return ret; @@ -3343,7 +3469,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i; - u32 max_cmd = instance->max_fw_cmds; + u16 max_cmd = instance->max_fw_cmds; u32 defer_index; unsigned long flags; @@ -3719,7 +3845,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) static void megasas_teardown_frame_pool(struct megasas_instance *instance) { int i; - u32 max_cmd = instance->max_mfi_cmds; + u16 max_cmd = instance->max_mfi_cmds; struct megasas_cmd *cmd; if (!instance->frame_dma_pool) @@ -3763,9 +3889,8 @@ static void megasas_teardown_frame_pool(struct megasas_instance *instance) static int megasas_create_frame_pool(struct megasas_instance *instance) { int i; - u32 max_cmd; + u16 max_cmd; u32 sge_sz; - u32 total_sz; u32 frame_count; struct megasas_cmd *cmd; @@ -3793,12 +3918,13 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) * Total 192 byte (3 MFI frame of 64 byte) */ frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); - total_sz = MEGAMFI_FRAME_SIZE * frame_count; + instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count; /* * Use DMA pool facility provided by PCI layer */ instance->frame_dma_pool = pci_pool_create("megasas frame pool", - instance->pdev, total_sz, 256, 0); + instance->pdev, instance->mfi_frame_size, + 256, 0); if (!instance->frame_dma_pool) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n"); @@ -3842,7 +3968,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) return -ENOMEM; } - memset(cmd->frame, 0, total_sz); + memset(cmd->frame, 0, instance->mfi_frame_size); cmd->frame->io.context = cpu_to_le32(cmd->index); cmd->frame->io.pad_0 = 0; if (!instance->ctrl_context && reset_devices) @@ -3897,7 +4023,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance) { int i; int j; - u32 max_cmd; + u16 max_cmd; struct megasas_cmd *cmd; struct fusion_context *fusion; @@ -3974,18 +4100,22 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) { return INITIATE_OCR; } -static int -megasas_get_pd_info(struct megasas_instance *instance, u16 device_id) +static void +megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev) { int ret; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; + struct MR_PRIV_DEVICE *mr_device_priv_data; + u16 device_id = 0; + + device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; cmd = megasas_get_cmd(instance); if (!cmd) { dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__); - return -ENOMEM; + return; } dcmd = &cmd->frame->dcmd; @@ -4012,7 +4142,9 @@ megasas_get_pd_info(struct megasas_instance *instance, u16 device_id) switch (ret) { case DCMD_SUCCESS: - instance->pd_list[device_id].interface = + mr_device_priv_data = sdev->hostdata; + le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType); + mr_device_priv_data->interface_type = instance->pd_info->state.ddf.pdType.intf; break; @@ -4039,7 +4171,7 @@ megasas_get_pd_info(struct megasas_instance *instance, u16 device_id) if (ret != DCMD_TIMEOUT) megasas_return_cmd(instance, cmd); - return ret; + return; } /* * megasas_get_pd_list_info - Returns FW's pd_list structure @@ -4418,8 +4550,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) static void megasas_update_ext_vd_details(struct megasas_instance *instance) { struct fusion_context *fusion; - u32 old_map_sz; - u32 new_map_sz; + u32 ventura_map_sz = 0; fusion = instance->ctrl_context; /* For MFI based controllers return dummy success */ @@ -4449,21 +4580,27 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance) instance->supportmax256vd ? "Extended VD(240 VD)firmware" : "Legacy(64 VD) firmware"); - old_map_sz = sizeof(struct MR_FW_RAID_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) * - (instance->fw_supported_vd_count - 1)); - new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); - fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) * - (instance->drv_supported_vd_count - 1)); - - fusion->max_map_sz = max(old_map_sz, new_map_sz); + if (instance->max_raid_mapsize) { + ventura_map_sz = instance->max_raid_mapsize * + MR_MIN_MAP_SIZE; /* 64k */ + fusion->current_map_sz = ventura_map_sz; + fusion->max_map_sz = ventura_map_sz; + } else { + fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) + + (sizeof(struct MR_LD_SPAN_MAP) * + (instance->fw_supported_vd_count - 1)); + fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); + fusion->max_map_sz = + max(fusion->old_map_sz, fusion->new_map_sz); - if (instance->supportmax256vd) - fusion->current_map_sz = new_map_sz; - else - fusion->current_map_sz = old_map_sz; + if (instance->supportmax256vd) + fusion->current_map_sz = fusion->new_map_sz; + else + fusion->current_map_sz = fusion->old_map_sz; + } + /* irrespective of FW raid maps, driver raid map is constant */ + fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL); } /** @@ -4533,6 +4670,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance) le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); + le16_to_cpus((u16 *)&ctrl_info->adapter_operations4); /* Update the latest Ext VD info. * From Init path, store current firmware details. @@ -4542,6 +4680,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance) megasas_update_ext_vd_details(instance); instance->use_seqnum_jbod_fp = ctrl_info->adapterOperations3.useSeqNumJbodFP; + instance->support_morethan256jbod = + ctrl_info->adapter_operations4.support_pd_map_target_id; /*Check whether controller is iMR or MR */ instance->is_imr = (ctrl_info->memory_size ? 0 : 1); @@ -4989,13 +5129,13 @@ skip_alloc: static int megasas_init_fw(struct megasas_instance *instance) { u32 max_sectors_1; - u32 max_sectors_2; - u32 tmp_sectors, msix_enable, scratch_pad_2; + u32 max_sectors_2, tmp_sectors, msix_enable; + u32 scratch_pad_2, scratch_pad_3, scratch_pad_4; resource_size_t base_addr; struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info = NULL; unsigned long bar_list; - int i, loop, fw_msix_count = 0; + int i, j, loop, fw_msix_count = 0; struct IOV_111 *iovPtr; struct fusion_context *fusion; @@ -5020,34 +5160,29 @@ static int megasas_init_fw(struct megasas_instance *instance) reg_set = instance->reg_set; - switch (instance->pdev->device) { - case PCI_DEVICE_ID_LSI_FUSION: - case PCI_DEVICE_ID_LSI_PLASMA: - case PCI_DEVICE_ID_LSI_INVADER: - case PCI_DEVICE_ID_LSI_FURY: - case PCI_DEVICE_ID_LSI_INTRUDER: - case PCI_DEVICE_ID_LSI_INTRUDER_24: - case PCI_DEVICE_ID_LSI_CUTLASS_52: - case PCI_DEVICE_ID_LSI_CUTLASS_53: + if (fusion) instance->instancet = &megasas_instance_template_fusion; - break; - case PCI_DEVICE_ID_LSI_SAS1078R: - case PCI_DEVICE_ID_LSI_SAS1078DE: - instance->instancet = &megasas_instance_template_ppc; - break; - case PCI_DEVICE_ID_LSI_SAS1078GEN2: - case PCI_DEVICE_ID_LSI_SAS0079GEN2: - instance->instancet = &megasas_instance_template_gen2; - break; - case PCI_DEVICE_ID_LSI_SAS0073SKINNY: - case PCI_DEVICE_ID_LSI_SAS0071SKINNY: - instance->instancet = &megasas_instance_template_skinny; - break; - case PCI_DEVICE_ID_LSI_SAS1064R: - case PCI_DEVICE_ID_DELL_PERC5: - default: - instance->instancet = &megasas_instance_template_xscale; - break; + else { + switch (instance->pdev->device) { + case PCI_DEVICE_ID_LSI_SAS1078R: + case PCI_DEVICE_ID_LSI_SAS1078DE: + instance->instancet = &megasas_instance_template_ppc; + break; + case PCI_DEVICE_ID_LSI_SAS1078GEN2: + case PCI_DEVICE_ID_LSI_SAS0079GEN2: + instance->instancet = &megasas_instance_template_gen2; + break; + case PCI_DEVICE_ID_LSI_SAS0073SKINNY: + case PCI_DEVICE_ID_LSI_SAS0071SKINNY: + instance->instancet = &megasas_instance_template_skinny; + break; + case PCI_DEVICE_ID_LSI_SAS1064R: + case PCI_DEVICE_ID_DELL_PERC5: + default: + instance->instancet = &megasas_instance_template_xscale; + instance->pd_list_not_supported = 1; + break; + } } if (megasas_transition_to_ready(instance, 0)) { @@ -5066,13 +5201,13 @@ static int megasas_init_fw(struct megasas_instance *instance) goto fail_ready_state; } - /* - * MSI-X host index 0 is common for all adapter. - * It is used for all MPT based Adapters. - */ - instance->reply_post_host_index_addr[0] = - (u32 __iomem *)((u8 __iomem *)instance->reg_set + - MPI2_REPLY_POST_HOST_INDEX_OFFSET); + if (instance->is_ventura) { + scratch_pad_3 = + readl(&instance->reg_set->outbound_scratch_pad_3); + instance->max_raid_mapsize = ((scratch_pad_3 >> + MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) & + MR_MAX_RAID_MAP_SIZE_MASK); + } /* Check if MSI-X is supported while in ready state */ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & @@ -5092,6 +5227,9 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->msix_vectors = ((scratch_pad_2 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; + if (instance->msix_vectors > 16) + instance->msix_combined = true; + if (rdpq_enable) instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0; @@ -5125,6 +5263,20 @@ static int megasas_init_fw(struct megasas_instance *instance) else instance->msix_vectors = 0; } + /* + * MSI-X host index 0 is common for all adapter. + * It is used for all MPT based Adapters. + */ + if (instance->msix_combined) { + instance->reply_post_host_index_addr[0] = + (u32 *)((u8 *)instance->reg_set + + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET); + } else { + instance->reply_post_host_index_addr[0] = + (u32 *)((u8 *)instance->reg_set + + MPI2_REPLY_POST_HOST_INDEX_OFFSET); + } + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); if (i < 0) goto fail_setup_irqs; @@ -5155,6 +5307,18 @@ static int megasas_init_fw(struct megasas_instance *instance) if (instance->instancet->init_adapter(instance)) goto fail_init_adapter; + if (instance->is_ventura) { + scratch_pad_4 = + readl(&instance->reg_set->outbound_scratch_pad_4); + if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= + MR_DEFAULT_NVME_PAGE_SHIFT) + instance->nvme_page_size = + (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK)); + + dev_info(&instance->pdev->dev, + "NVME page size\t: (%d)\n", instance->nvme_page_size); + } + if (instance->msix_vectors ? megasas_setup_irqs_msix(instance, 1) : megasas_setup_irqs_ioapic(instance)) @@ -5173,13 +5337,43 @@ static int megasas_init_fw(struct megasas_instance *instance) (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); if (megasas_get_pd_list(instance) < 0) { dev_err(&instance->pdev->dev, "failed to get PD list\n"); - goto fail_get_pd_list; + goto fail_get_ld_pd_list; } memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); + + /* stream detection initialization */ + if (instance->is_ventura && fusion) { + fusion->stream_detect_by_ld = + kzalloc(sizeof(struct LD_STREAM_DETECT *) + * MAX_LOGICAL_DRIVES_EXT, + GFP_KERNEL); + if (!fusion->stream_detect_by_ld) { + dev_err(&instance->pdev->dev, + "unable to allocate stream detection for pool of LDs\n"); + goto fail_get_ld_pd_list; + } + for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) { + fusion->stream_detect_by_ld[i] = + kmalloc(sizeof(struct LD_STREAM_DETECT), + GFP_KERNEL); + if (!fusion->stream_detect_by_ld[i]) { + dev_err(&instance->pdev->dev, + "unable to allocate stream detect by LD\n "); + for (j = 0; j < i; ++j) + kfree(fusion->stream_detect_by_ld[j]); + kfree(fusion->stream_detect_by_ld); + fusion->stream_detect_by_ld = NULL; + goto fail_get_ld_pd_list; + } + fusion->stream_detect_by_ld[i]->mru_bit_map + = MR_STREAM_BITMAP; + } + } + if (megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) - megasas_get_ld_list(instance); + goto fail_get_ld_pd_list; /* * Compute the max allowed sectors per IO: The controller info has two @@ -5296,7 +5490,7 @@ static int megasas_init_fw(struct megasas_instance *instance) return 0; -fail_get_pd_list: +fail_get_ld_pd_list: instance->instancet->disable_intr(instance); fail_init_adapter: megasas_destroy_irqs(instance); @@ -5309,9 +5503,11 @@ fail_ready_state: instance->ctrl_info = NULL; iounmap(instance->reg_set); - fail_ioremap: +fail_ioremap: pci_release_selected_regions(instance->pdev, 1<<instance->bar); + dev_err(&instance->pdev->dev, "Failed from %s %d\n", + __func__, __LINE__); return -EINVAL; } @@ -5531,6 +5727,98 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, return 0; } +/* megasas_get_target_prop - Send DCMD with below details to firmware. + * + * This DCMD will fetch few properties of LD/system PD defined + * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value. + * + * DCMD send by drivers whenever new target is added to the OS. + * + * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP + * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD. + * 0 = system PD, 1 = LD. + * dcmd.mbox.s[1] - TargetID for LD/system PD. + * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES. + * + * @instance: Adapter soft state + * @sdev: OS provided scsi device + * + * Returns 0 on success non-zero on failure. + */ +static int +megasas_get_target_prop(struct megasas_instance *instance, + struct scsi_device *sdev) +{ + int ret; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + u16 targetId = (sdev->channel % 2) + sdev->id; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_err(&instance->pdev->dev, + "Failed to get cmd %s\n", __func__); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev); + + dcmd->mbox.s[1] = cpu_to_le16(targetId); + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = + cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); + dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP); + dcmd->sgl.sge32[0].phys_addr = + cpu_to_le32(instance->tgt_prop_h); + dcmd->sgl.sge32[0].length = + cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES)); + + if (instance->ctrl_context && !instance->mask_interrupts) + ret = megasas_issue_blocked_cmd(instance, + cmd, MFI_IO_TIMEOUT_SECS); + else + ret = megasas_issue_polled(instance, cmd); + + switch (ret) { + case DCMD_TIMEOUT: + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, + "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + break; + + default: + megasas_return_cmd(instance, cmd); + } + if (ret != DCMD_SUCCESS) + dev_err(&instance->pdev->dev, + "return from %s %d return value %d\n", + __func__, __LINE__, ret); + + return ret; +} + /** * megasas_start_aen - Subscribes to AEN during driver load time * @instance: Adapter soft state @@ -5714,6 +6002,12 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->pdev = pdev; switch (instance->pdev->device) { + case PCI_DEVICE_ID_LSI_VENTURA: + case PCI_DEVICE_ID_LSI_HARPOON: + case PCI_DEVICE_ID_LSI_TOMCAT: + case PCI_DEVICE_ID_LSI_VENTURA_4PORT: + case PCI_DEVICE_ID_LSI_CRUSADER_4PORT: + instance->is_ventura = true; case PCI_DEVICE_ID_LSI_FUSION: case PCI_DEVICE_ID_LSI_PLASMA: case PCI_DEVICE_ID_LSI_INVADER: @@ -5723,21 +6017,17 @@ static int megasas_probe_one(struct pci_dev *pdev, case PCI_DEVICE_ID_LSI_CUTLASS_52: case PCI_DEVICE_ID_LSI_CUTLASS_53: { - instance->ctrl_context_pages = - get_order(sizeof(struct fusion_context)); - instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL, - instance->ctrl_context_pages); - if (!instance->ctrl_context) { - dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate " - "memory for Fusion context info\n"); + if (megasas_alloc_fusion_context(instance)) { + megasas_free_fusion_context(instance); goto fail_alloc_dma_buf; } fusion = instance->ctrl_context; - memset(fusion, 0, - ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) fusion->adapter_type = THUNDERBOLT_SERIES; + else if (instance->is_ventura) + fusion->adapter_type = VENTURA_SERIES; else fusion->adapter_type = INVADER_SERIES; } @@ -5799,9 +6089,17 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->pd_info = pci_alloc_consistent(pdev, sizeof(struct MR_PD_INFO), &instance->pd_info_h); + instance->pd_info = pci_alloc_consistent(pdev, + sizeof(struct MR_PD_INFO), &instance->pd_info_h); + instance->tgt_prop = pci_alloc_consistent(pdev, + sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h); + if (!instance->pd_info) dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n"); + if (!instance->tgt_prop) + dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n"); + instance->crash_dump_buf = pci_alloc_consistent(pdev, CRASH_DMA_BUF_SIZE, &instance->crash_dump_h); @@ -5823,6 +6121,7 @@ static int megasas_probe_one(struct pci_dev *pdev, spin_lock_init(&instance->mfi_pool_lock); spin_lock_init(&instance->hba_lock); + spin_lock_init(&instance->stream_lock); spin_lock_init(&instance->completion_lock); mutex_init(&instance->reset_mutex); @@ -5945,6 +6244,10 @@ fail_alloc_dma_buf: pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), instance->pd_info, instance->pd_info_h); + if (instance->tgt_prop) + pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), + instance->tgt_prop, + instance->tgt_prop_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); @@ -6217,6 +6520,10 @@ fail_init_mfi: pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), instance->pd_info, instance->pd_info_h); + if (instance->tgt_prop) + pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), + instance->tgt_prop, + instance->tgt_prop_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); @@ -6330,6 +6637,14 @@ skip_firing_dcmds: if (instance->msix_vectors) pci_free_irq_vectors(instance->pdev); + if (instance->is_ventura) { + for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) + kfree(fusion->stream_detect_by_ld[i]); + kfree(fusion->stream_detect_by_ld); + fusion->stream_detect_by_ld = NULL; + } + + if (instance->ctrl_context) { megasas_release_fusion(instance); pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + @@ -6350,8 +6665,7 @@ skip_firing_dcmds: fusion->pd_seq_sync[i], fusion->pd_seq_phys[i]); } - free_pages((ulong)instance->ctrl_context, - instance->ctrl_context_pages); + megasas_free_fusion_context(instance); } else { megasas_release_mfi(instance); pci_free_consistent(pdev, sizeof(u32), @@ -6367,11 +6681,14 @@ skip_firing_dcmds: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); - if (instance->pd_info) pci_free_consistent(pdev, sizeof(struct MR_PD_INFO), instance->pd_info, instance->pd_info_h); + if (instance->tgt_prop) + pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES), + instance->tgt_prop, + instance->tgt_prop_h); if (instance->vf_affiliation) pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), @@ -6570,6 +6887,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, MFI_FRAME_SGL64 | MFI_FRAME_SENSE64)); + if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) { + if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) { + megasas_return_cmd(instance, cmd); + return -1; + } + } + if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) { error = megasas_set_crash_dump_params_ioctl(cmd); megasas_return_cmd(instance, cmd); @@ -6678,7 +7002,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + ioc->sense_off); - if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), + if (copy_to_user((void __user *)((unsigned long) + get_unaligned((unsigned long *)sense_ptr)), sense, ioc->sense_len)) { dev_err(&instance->pdev->dev, "Failed to copy out to user " "sense data\n"); @@ -7047,6 +7372,13 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, megasas_sysfs_set_dbg_lvl); +static inline void megasas_remove_scsi_device(struct scsi_device *sdev) +{ + sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n"); + scsi_remove_device(sdev); + scsi_device_put(sdev); +} + static void megasas_aen_polling(struct work_struct *work) { @@ -7151,10 +7483,8 @@ megasas_aen_polling(struct work_struct *work) else scsi_device_put(sdev1); } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); - } + if (sdev1) + megasas_remove_scsi_device(sdev1); } } } @@ -7171,10 +7501,8 @@ megasas_aen_polling(struct work_struct *work) else scsi_device_put(sdev1); } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); - } + if (sdev1) + megasas_remove_scsi_device(sdev1); } } } diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index f237d0003df3..62affa76133d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -77,7 +77,6 @@ MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding " #endif #define TRUE 1 -#define SPAN_DEBUG 0 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) #define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) #define SPAN_INVALID 0xff @@ -155,12 +154,17 @@ __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) return map->raidMap.devHndlInfo[pd].curDevHdl; } +static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) +{ + return map->raidMap.devHndlInfo[pd].interfaceType; +} + u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) { return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); } -u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) +u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) { return map->raidMap.ldTgtIdToLd[ldTgtId]; } @@ -179,18 +183,108 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) struct fusion_context *fusion = instance->ctrl_context; struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; struct MR_FW_RAID_MAP *pFwRaidMap = NULL; - int i; + int i, j; u16 ld_count; + struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn; + struct MR_FW_RAID_MAP_EXT *fw_map_ext; + struct MR_RAID_MAP_DESC_TABLE *desc_table; struct MR_DRV_RAID_MAP_ALL *drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap; + void *raid_map_data = NULL; + + memset(drv_map, 0, fusion->drv_map_sz); + memset(pDrvRaidMap->ldTgtIdToLd, + 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN)); + + if (instance->max_raid_mapsize) { + fw_map_dyn = fusion->ld_map[(instance->map_id & 1)]; + desc_table = + (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset)); + if (desc_table != fw_map_dyn->raid_map_desc_table) + dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n", + desc_table, fw_map_dyn->raid_map_desc_table); + + ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count); + pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); + pDrvRaidMap->fpPdIoTimeoutSec = + fw_map_dyn->fp_pd_io_timeout_sec; + pDrvRaidMap->totalSize = + cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL)); + /* point to actual data starting point*/ + raid_map_data = (void *)fw_map_dyn + + le32_to_cpu(fw_map_dyn->desc_table_offset) + + le32_to_cpu(fw_map_dyn->desc_table_size); + + for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) { + switch (le32_to_cpu(desc_table->raid_map_desc_type)) { + case RAID_MAP_DESC_TYPE_DEVHDL_INFO: + fw_map_dyn->dev_hndl_info = + (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); + memcpy(pDrvRaidMap->devHndlInfo, + fw_map_dyn->dev_hndl_info, + sizeof(struct MR_DEV_HANDLE_INFO) * + le32_to_cpu(desc_table->raid_map_desc_elements)); + break; + case RAID_MAP_DESC_TYPE_TGTID_INFO: + fw_map_dyn->ld_tgt_id_to_ld = + (u16 *)(raid_map_data + + le32_to_cpu(desc_table->raid_map_desc_offset)); + for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) { + pDrvRaidMap->ldTgtIdToLd[j] = + le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]); + } + break; + case RAID_MAP_DESC_TYPE_ARRAY_INFO: + fw_map_dyn->ar_map_info = + (struct MR_ARRAY_INFO *) + (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset)); + memcpy(pDrvRaidMap->arMapInfo, + fw_map_dyn->ar_map_info, + sizeof(struct MR_ARRAY_INFO) * + le32_to_cpu(desc_table->raid_map_desc_elements)); + break; + case RAID_MAP_DESC_TYPE_SPAN_INFO: + fw_map_dyn->ld_span_map = + (struct MR_LD_SPAN_MAP *) + (raid_map_data + + le32_to_cpu(desc_table->raid_map_desc_offset)); + memcpy(pDrvRaidMap->ldSpanMap, + fw_map_dyn->ld_span_map, + sizeof(struct MR_LD_SPAN_MAP) * + le32_to_cpu(desc_table->raid_map_desc_elements)); + break; + default: + dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n", + fw_map_dyn->desc_table_num_elements); + } + ++desc_table; + } + + } else if (instance->supportmax256vd) { + fw_map_ext = + (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)]; + ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount); + if (ld_count > MAX_LOGICAL_DRIVES_EXT) { + dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n"); + return; + } + + pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); + pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec; + for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++) + pDrvRaidMap->ldTgtIdToLd[i] = + (u16)fw_map_ext->ldTgtIdToLd[i]; + memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap, + sizeof(struct MR_LD_SPAN_MAP) * ld_count); + memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo, + sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT); + memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo, + sizeof(struct MR_DEV_HANDLE_INFO) * + MAX_RAIDMAP_PHYSICAL_DEVICES); - if (instance->supportmax256vd) { - memcpy(fusion->ld_drv_map[instance->map_id & 1], - fusion->ld_map[instance->map_id & 1], - fusion->current_map_sz); /* New Raid map will not set totalSize, so keep expected value * for legacy code in ValidateMapInfo */ @@ -201,50 +295,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) fusion->ld_map[(instance->map_id & 1)]; pFwRaidMap = &fw_map_old->raidMap; ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); - -#if VD_EXT_DEBUG - for (i = 0; i < ld_count; i++) { - dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " - "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", - instance->unique_id, i, - fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId, - fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum, - fw_map_old->raidMap.ldSpanMap[i].ldRaid.size); - } -#endif - - memset(drv_map, 0, fusion->drv_map_sz); pDrvRaidMap->totalSize = pFwRaidMap->totalSize; pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) pDrvRaidMap->ldTgtIdToLd[i] = (u8)pFwRaidMap->ldTgtIdToLd[i]; - for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS); - i < MAX_LOGICAL_DRIVES_EXT; i++) - pDrvRaidMap->ldTgtIdToLd[i] = 0xff; for (i = 0; i < ld_count; i++) { pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, - "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " - "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x " - "size 0x%x\n", i, i, - pFwRaidMap->ldSpanMap[i].ldRaid.targetId, - pFwRaidMap->ldSpanMap[i].ldRaid.seqNum, - (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize); - dev_dbg(&instance->pdev->dev, - "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x " - "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x " - "size 0x%x\n", i, i, - pDrvRaidMap->ldSpanMap[i].ldRaid.targetId, - pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum, - (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize); - dev_dbg(&instance->pdev->dev, "Driver raid map all %p " - "raid map %p LD RAID MAP %p/%p\n", drv_map, - pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid, - &pDrvRaidMap->ldSpanMap[i].ldRaid); -#endif } memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo, sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS); @@ -265,7 +323,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) struct LD_LOAD_BALANCE_INFO *lbInfo; PLD_SPAN_INFO ldSpanInfo; struct MR_LD_RAID *raid; - u16 ldCount, num_lds; + u16 num_lds, i; u16 ld; u32 expected_size; @@ -279,7 +337,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) lbInfo = fusion->load_balance_info; ldSpanInfo = fusion->log_to_span; - if (instance->supportmax256vd) + if (instance->max_raid_mapsize) + expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL); + else if (instance->supportmax256vd) expected_size = sizeof(struct MR_FW_RAID_MAP_EXT); else expected_size = @@ -287,8 +347,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount))); if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { - dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n", - (unsigned int) expected_size); + dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x", + le32_to_cpu(pDrvRaidMap->totalSize)); + dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n", + (unsigned int)expected_size); dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), le32_to_cpu(pDrvRaidMap->totalSize)); @@ -298,15 +360,23 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) if (instance->UnevenSpanSupport) mr_update_span_set(drv_map, ldSpanInfo); - mr_update_load_balance_params(drv_map, lbInfo); + if (lbInfo) + mr_update_load_balance_params(drv_map, lbInfo); num_lds = le16_to_cpu(drv_map->raidMap.ldCount); /*Convert Raid capability values to CPU arch */ - for (ldCount = 0; ldCount < num_lds; ldCount++) { - ld = MR_TargetIdToLdGet(ldCount, drv_map); + for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) { + ld = MR_TargetIdToLdGet(i, drv_map); + + /* For non existing VDs, iterate to next VD*/ + if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) + continue; + raid = MR_LdRaidGet(ld, drv_map); le32_to_cpus((u32 *)&raid->capability); + + num_lds--; } return 1; @@ -348,91 +418,6 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, /* ****************************************************************************** * -* Function to print info about span set created in driver from FW raid map -* -* Inputs : -* map - LD map -* ldSpanInfo - ldSpanInfo per HBA instance -*/ -#if SPAN_DEBUG -static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map, - PLD_SPAN_INFO ldSpanInfo) -{ - - u8 span; - u32 element; - struct MR_LD_RAID *raid; - LD_SPAN_SET *span_set; - struct MR_QUAD_ELEMENT *quad; - int ldCount; - u16 ld; - - for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { - ld = MR_TargetIdToLdGet(ldCount, map); - if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) - continue; - raid = MR_LdRaidGet(ld, map); - dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", - ld, raid->spanDepth); - for (span = 0; span < raid->spanDepth; span++) - dev_dbg(&instance->pdev->dev, "Span=%x," - " number of quads=%x\n", span, - le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements)); - for (element = 0; element < MAX_QUAD_DEPTH; element++) { - span_set = &(ldSpanInfo[ld].span_set[element]); - if (span_set->span_row_data_width == 0) - break; - - dev_dbg(&instance->pdev->dev, "Span Set %x:" - "width=%x, diff=%x\n", element, - (unsigned int)span_set->span_row_data_width, - (unsigned int)span_set->diff); - dev_dbg(&instance->pdev->dev, "logical LBA" - "start=0x%08lx, end=0x%08lx\n", - (long unsigned int)span_set->log_start_lba, - (long unsigned int)span_set->log_end_lba); - dev_dbg(&instance->pdev->dev, "span row start=0x%08lx," - " end=0x%08lx\n", - (long unsigned int)span_set->span_row_start, - (long unsigned int)span_set->span_row_end); - dev_dbg(&instance->pdev->dev, "data row start=0x%08lx," - " end=0x%08lx\n", - (long unsigned int)span_set->data_row_start, - (long unsigned int)span_set->data_row_end); - dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx," - " end=0x%08lx\n", - (long unsigned int)span_set->data_strip_start, - (long unsigned int)span_set->data_strip_end); - - for (span = 0; span < raid->spanDepth; span++) { - if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements) >= - element + 1) { - quad = &map->raidMap.ldSpanMap[ld]. - spanBlock[span].block_span_info. - quad[element]; - dev_dbg(&instance->pdev->dev, "Span=%x," - "Quad=%x, diff=%x\n", span, - element, le32_to_cpu(quad->diff)); - dev_dbg(&instance->pdev->dev, - "offset_in_span=0x%08lx\n", - (long unsigned int)le64_to_cpu(quad->offsetInSpan)); - dev_dbg(&instance->pdev->dev, - "logical start=0x%08lx, end=0x%08lx\n", - (long unsigned int)le64_to_cpu(quad->logStart), - (long unsigned int)le64_to_cpu(quad->logEnd)); - } - } - } - } - return 0; -} -#endif - -/* -****************************************************************************** -* * This routine calculates the Span block for given row using spanset. * * Inputs : @@ -543,19 +528,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance, else break; } -#if SPAN_DEBUG - dev_info(&instance->pdev->dev, "Strip 0x%llx," - "span_set_Strip 0x%llx, span_set_Row 0x%llx" - "data width 0x%llx span offset 0x%x\n", strip, - (unsigned long long)span_set_Strip, - (unsigned long long)span_set_Row, - (unsigned long long)span_set->span_row_data_width, - span_offset); - dev_info(&instance->pdev->dev, "For strip 0x%llx" - "row is 0x%llx\n", strip, - (unsigned long long) span_set->data_row_start + - (unsigned long long) span_set_Row + (span_offset - 1)); -#endif + retval = (span_set->data_row_start + span_set_Row + (span_offset - 1)); return retval; @@ -672,11 +645,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance, else break; } -#if SPAN_DEBUG - dev_info(&instance->pdev->dev, "get_arm_from_strip:" - "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, - (long unsigned int)strip, (strip_offset - span_offset)); -#endif + retval = (strip_offset - span_offset); return retval; } @@ -737,16 +706,18 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); - u32 pd, arRef; + u32 pd, arRef, r1_alt_pd; u8 physArm, span; u64 row; u8 retval = TRUE; u64 *pdBlock = &io_info->pdBlock; __le16 *pDevHandle = &io_info->devHandle; + u8 *pPdInterface = &io_info->pd_interface; u32 logArm, rowMod, armQ, arm; struct fusion_context *fusion; fusion = instance->ctrl_context; + *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); /*Get row and span from io_info for Uneven Span IO.*/ row = io_info->start_row; @@ -772,27 +743,46 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); - if (pd != MR_PD_INVALID) + if (pd != MR_PD_INVALID) { *pDevHandle = MR_PdDevHandleGet(pd, map); - else { - *pDevHandle = cpu_to_le16(MR_PD_INVALID); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); + /* get second pd also for raid 1/10 fast path writes*/ + if (instance->is_ventura && + (raid->level == 1) && + !io_info->isRead) { + r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); + if (r1_alt_pd != MR_PD_INVALID) + io_info->r1_alt_dev_handle = + MR_PdDevHandleGet(r1_alt_pd, map); + } + } else { if ((raid->level >= 5) && ((fusion->adapter_type == THUNDERBOLT_SERIES) || ((fusion->adapter_type == INVADER_SERIES) && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) - pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; + pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { physArm = physArm + 1; pd = MR_ArPdGet(arRef, physArm, map); - if (pd != MR_PD_INVALID) + if (pd != MR_PD_INVALID) { *pDevHandle = MR_PdDevHandleGet(pd, map); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); + } } } *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); - pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | - physArm; - io_info->span_arm = pRAID_Context->spanArm; + if (instance->is_ventura) { + ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + } else { + pRAID_Context->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = pRAID_Context->span_arm; + } + io_info->pd_after_lb = pd; return retval; } @@ -819,16 +809,17 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, struct MR_DRV_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); - u32 pd, arRef; + u32 pd, arRef, r1_alt_pd; u8 physArm, span; u64 row; u8 retval = TRUE; u64 *pdBlock = &io_info->pdBlock; __le16 *pDevHandle = &io_info->devHandle; + u8 *pPdInterface = &io_info->pd_interface; struct fusion_context *fusion; fusion = instance->ctrl_context; - + *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); row = mega_div64_32(stripRow, raid->rowDataSize); @@ -867,31 +858,49 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, arRef = MR_LdSpanArrayGet(ld, span, map); pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ - if (pd != MR_PD_INVALID) + if (pd != MR_PD_INVALID) { /* Get dev handle from Pd. */ *pDevHandle = MR_PdDevHandleGet(pd, map); - else { - /* set dev handle as invalid. */ - *pDevHandle = cpu_to_le16(MR_PD_INVALID); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); + /* get second pd also for raid 1/10 fast path writes*/ + if (instance->is_ventura && + (raid->level == 1) && + !io_info->isRead) { + r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); + if (r1_alt_pd != MR_PD_INVALID) + io_info->r1_alt_dev_handle = + MR_PdDevHandleGet(r1_alt_pd, map); + } + } else { if ((raid->level >= 5) && ((fusion->adapter_type == THUNDERBOLT_SERIES) || ((fusion->adapter_type == INVADER_SERIES) && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) - pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; + pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { /* Get alternate Pd. */ physArm = physArm + 1; pd = MR_ArPdGet(arRef, physArm, map); - if (pd != MR_PD_INVALID) + if (pd != MR_PD_INVALID) { /* Get dev handle from Pd */ *pDevHandle = MR_PdDevHandleGet(pd, map); + *pPdInterface = MR_PdInterfaceTypeGet(pd, map); + } } } *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); - pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | - physArm; - io_info->span_arm = pRAID_Context->spanArm; + if (instance->is_ventura) { + ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + } else { + pRAID_Context->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; + io_info->span_arm = pRAID_Context->span_arm; + } + io_info->pd_after_lb = pd; return retval; } @@ -912,7 +921,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, { struct fusion_context *fusion; struct MR_LD_RAID *raid; - u32 ld, stripSize, stripe_mask; + u32 stripSize, stripe_mask; u64 endLba, endStrip, endRow, start_row, start_strip; u64 regStart; u32 regSize; @@ -924,6 +933,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, u8 retval = 0; u8 startlba_span = SPAN_INVALID; u64 *pdBlock = &io_info->pdBlock; + u16 ld; ldStartBlock = io_info->ldStartBlock; numBlocks = io_info->numBlocks; @@ -935,6 +945,8 @@ MR_BuildRaidContext(struct megasas_instance *instance, ld = MR_TargetIdToLdGet(ldTgtId, map); raid = MR_LdRaidGet(ld, map); + /*check read ahead bit*/ + io_info->ra_capable = raid->capability.ra_capable; /* * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero @@ -996,17 +1008,6 @@ MR_BuildRaidContext(struct megasas_instance *instance, } io_info->start_span = startlba_span; io_info->start_row = start_row; -#if SPAN_DEBUG - dev_dbg(&instance->pdev->dev, "Check Span number from %s %d" - "for row 0x%llx, start strip 0x%llx end strip 0x%llx" - " span 0x%x\n", __func__, __LINE__, - (unsigned long long)start_row, - (unsigned long long)start_strip, - (unsigned long long)endStrip, startlba_span); - dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx" - "Start span 0x%x\n", (unsigned long long)start_row, - (unsigned long long)endRow, startlba_span); -#endif } else { start_row = mega_div64_32(start_strip, raid->rowDataSize); endRow = mega_div64_32(endStrip, raid->rowDataSize); @@ -1093,20 +1094,20 @@ MR_BuildRaidContext(struct megasas_instance *instance, regSize += stripSize; } - pRAID_Context->timeoutValue = + pRAID_Context->timeout_value = cpu_to_le16(raid->fpIoTimeoutForLd ? raid->fpIoTimeoutForLd : map->raidMap.fpPdIoTimeoutSec); if (fusion->adapter_type == INVADER_SERIES) - pRAID_Context->regLockFlags = (isRead) ? + pRAID_Context->reg_lock_flags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; - else - pRAID_Context->regLockFlags = (isRead) ? + else if (!instance->is_ventura) + pRAID_Context->reg_lock_flags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; - pRAID_Context->VirtualDiskTgtId = raid->targetId; - pRAID_Context->regLockRowLBA = cpu_to_le64(regStart); - pRAID_Context->regLockLength = cpu_to_le32(regSize); - pRAID_Context->configSeqNum = raid->seqNum; + pRAID_Context->virtual_disk_tgt_id = raid->targetId; + pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart); + pRAID_Context->reg_lock_length = cpu_to_le32(regSize); + pRAID_Context->config_seq_num = raid->seqNum; /* save pointer to raid->LUN array */ *raidLUN = raid->LUN; @@ -1122,7 +1123,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, ref_in_start_stripe, io_info, pRAID_Context, map); /* If IO on an invalid Pd, then FP is not possible.*/ - if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID)) + if (io_info->devHandle == MR_DEVHANDLE_INVALID) io_info->fpOkForIo = FALSE; return retval; } else if (isRead) { @@ -1140,12 +1141,6 @@ MR_BuildRaidContext(struct megasas_instance *instance, return TRUE; } } - -#if SPAN_DEBUG - /* Just for testing what arm we get for strip.*/ - if (io_info->IoforUnevenSpan) - get_arm_from_strip(instance, ld, start_strip, map); -#endif return TRUE; } @@ -1259,10 +1254,6 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, break; } } -#if SPAN_DEBUG - getSpanInfo(map, ldSpanInfo); -#endif - } void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, @@ -1293,11 +1284,12 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map, } u8 megasas_get_best_arm_pd(struct megasas_instance *instance, - struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) + struct LD_LOAD_BALANCE_INFO *lbInfo, + struct IO_REQUEST_INFO *io_info, + struct MR_DRV_RAID_MAP_ALL *drv_map) { - struct fusion_context *fusion; struct MR_LD_RAID *raid; - struct MR_DRV_RAID_MAP_ALL *drv_map; + u16 pd1_dev_handle; u16 pend0, pend1, ld; u64 diff0, diff1; u8 bestArm, pd0, pd1, span, arm; @@ -1310,9 +1302,6 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance, >> RAID_CTX_SPANARM_SPAN_SHIFT); arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK); - - fusion = instance->ctrl_context; - drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map); raid = MR_LdRaidGet(ld, drv_map); span_row_size = instance->UnevenSpanSupport ? @@ -1323,47 +1312,52 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance, pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ? (arm + 1 - span_row_size) : arm + 1, drv_map); - /* get the pending cmds for the data and mirror arms */ - pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); - pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); + /* Get PD1 Dev Handle */ + + pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map); - /* Determine the disk whose head is nearer to the req. block */ - diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); - diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); - bestArm = (diff0 <= diff1 ? arm : arm ^ 1); + if (pd1_dev_handle == MR_DEVHANDLE_INVALID) { + bestArm = arm; + } else { + /* get the pending cmds for the data and mirror arms */ + pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]); + pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]); - if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || - (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) - bestArm ^= 1; + /* Determine the disk whose head is nearer to the req. block */ + diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]); + diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]); + bestArm = (diff0 <= diff1 ? arm : arm ^ 1); + + /* Make balance count from 16 to 4 to + * keep driver in sync with Firmware + */ + if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) || + (bestArm != arm && pend1 > pend0 + lb_pending_cmds)) + bestArm ^= 1; + + /* Update the last accessed block on the correct pd */ + io_info->span_arm = + (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; + io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; + } - /* Update the last accessed block on the correct pd */ - io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1; lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1; - io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm; -#if SPAN_DEBUG - if (arm != bestArm) - dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance " - "occur - span 0x%x arm 0x%x bestArm 0x%x " - "io_info->span_arm 0x%x\n", - span, arm, bestArm, io_info->span_arm); -#endif return io_info->pd_after_lb; } __le16 get_updated_dev_handle(struct megasas_instance *instance, - struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) + struct LD_LOAD_BALANCE_INFO *lbInfo, + struct IO_REQUEST_INFO *io_info, + struct MR_DRV_RAID_MAP_ALL *drv_map) { u8 arm_pd; __le16 devHandle; - struct fusion_context *fusion; - struct MR_DRV_RAID_MAP_ALL *drv_map; - - fusion = instance->ctrl_context; - drv_map = fusion->ld_drv_map[(instance->map_id & 1)]; /* get best new arm (PD ID) */ - arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info); + arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map); devHandle = MR_PdDevHandleGet(arm_pd, drv_map); + io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map); atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]); + return devHandle; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 24778ba4b6e8..29650ba669da 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -47,6 +47,7 @@ #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/poll.h> +#include <linux/vmalloc.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -181,32 +182,44 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd) { cmd->scmd = NULL; - memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); + memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); + cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; + cmd->cmd_completed = false; } /** * megasas_fire_cmd_fusion - Sends command to the FW + * @instance: Adapter soft state + * @req_desc: 32bit or 64bit Request descriptor + * + * Perform PCI Write. Ventura supports 32 bit Descriptor. + * Prior to Ventura (12G) MR controller supports 64 bit Descriptor. */ + static void megasas_fire_cmd_fusion(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) { + if (instance->is_ventura) + writel(le32_to_cpu(req_desc->u.low), + &instance->reg_set->inbound_single_queue_port); + else { #if defined(writeq) && defined(CONFIG_64BIT) - u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | - le32_to_cpu(req_desc->u.low)); + u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | + le32_to_cpu(req_desc->u.low)); - writeq(req_data, &instance->reg_set->inbound_low_queue_port); + writeq(req_data, &instance->reg_set->inbound_low_queue_port); #else - unsigned long flags; - - spin_lock_irqsave(&instance->hba_lock, flags); - writel(le32_to_cpu(req_desc->u.low), - &instance->reg_set->inbound_low_queue_port); - writel(le32_to_cpu(req_desc->u.high), - &instance->reg_set->inbound_high_queue_port); - mmiowb(); - spin_unlock_irqrestore(&instance->hba_lock, flags); + unsigned long flags; + spin_lock_irqsave(&instance->hba_lock, flags); + writel(le32_to_cpu(req_desc->u.low), + &instance->reg_set->inbound_low_queue_port); + writel(le32_to_cpu(req_desc->u.high), + &instance->reg_set->inbound_high_queue_port); + mmiowb(); + spin_unlock_irqrestore(&instance->hba_lock, flags); #endif + } } /** @@ -229,7 +242,10 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c reg_set = instance->reg_set; - cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF; + /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */ + if (!instance->is_ventura) + cur_max_fw_cmds = + readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF; if (dual_qdepth_disable || !cur_max_fw_cmds) cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; @@ -243,7 +259,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c if (fw_boot_context == OCR_CONTEXT) { cur_max_fw_cmds = cur_max_fw_cmds - 1; - if (cur_max_fw_cmds <= instance->max_fw_cmds) { + if (cur_max_fw_cmds < instance->max_fw_cmds) { instance->cur_can_queue = cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS); @@ -255,7 +271,8 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c instance->ldio_threshold = ldio_threshold; if (!instance->is_rdpq) - instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024); + instance->max_fw_cmds = + min_t(u16, instance->max_fw_cmds, 1024); if (reset_devices) instance->max_fw_cmds = min(instance->max_fw_cmds, @@ -271,7 +288,14 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c (MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS); instance->cur_can_queue = instance->max_scsi_cmds; + instance->host->can_queue = instance->cur_can_queue; } + + if (instance->is_ventura) + instance->max_mpt_cmds = + instance->max_fw_cmds * RAID_1_PEER_CMDS; + else + instance->max_mpt_cmds = instance->max_fw_cmds; } /** * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool @@ -285,7 +309,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance) struct megasas_cmd_fusion *cmd; /* SG, Sense */ - for (i = 0; i < instance->max_fw_cmds; i++) { + for (i = 0; i < instance->max_mpt_cmds; i++) { cmd = fusion->cmd_list[i]; if (cmd) { if (cmd->sg_frame) @@ -329,7 +353,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance) /* cmd_list */ - for (i = 0; i < instance->max_fw_cmds; i++) + for (i = 0; i < instance->max_mpt_cmds; i++) kfree(fusion->cmd_list[i]); kfree(fusion->cmd_list); @@ -343,7 +367,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance) static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) { int i; - u32 max_cmd; + u16 max_cmd; struct fusion_context *fusion; struct megasas_cmd_fusion *cmd; @@ -353,7 +377,8 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) fusion->sg_dma_pool = pci_pool_create("mr_sg", instance->pdev, - instance->max_chain_frame_sz, 4, 0); + instance->max_chain_frame_sz, + MR_DEFAULT_NVME_PAGE_SIZE, 0); /* SCSI_SENSE_BUFFERSIZE = 96 bytes */ fusion->sense_dma_pool = pci_pool_create("mr_sense", instance->pdev, @@ -381,33 +406,47 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) return -ENOMEM; } } + + /* create sense buffer for the raid 1/10 fp */ + for (i = max_cmd; i < instance->max_mpt_cmds; i++) { + cmd = fusion->cmd_list[i]; + cmd->sense = pci_pool_alloc(fusion->sense_dma_pool, + GFP_KERNEL, &cmd->sense_phys_addr); + if (!cmd->sense) { + dev_err(&instance->pdev->dev, + "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + return 0; } int megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) { - u32 max_cmd, i; + u32 max_mpt_cmd, i; struct fusion_context *fusion; fusion = instance->ctrl_context; - max_cmd = instance->max_fw_cmds; + max_mpt_cmd = instance->max_mpt_cmds; /* * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. * Allocate the dynamic array first and then allocate individual * commands. */ - fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd, - GFP_KERNEL); + fusion->cmd_list = + kzalloc(sizeof(struct megasas_cmd_fusion *) * max_mpt_cmd, + GFP_KERNEL); if (!fusion->cmd_list) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); return -ENOMEM; } - for (i = 0; i < max_cmd; i++) { + for (i = 0; i < max_mpt_cmd; i++) { fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), GFP_KERNEL); if (!fusion->cmd_list[i]) { @@ -539,7 +578,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance) } fusion->rdpq_virt[i].RDPQBaseAddress = - fusion->reply_frames_desc_phys[i]; + cpu_to_le64(fusion->reply_frames_desc_phys[i]); reply_desc = fusion->reply_frames_desc[i]; for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++) @@ -642,13 +681,14 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) */ /* SMID 0 is reserved. Set SMID/index from 1 */ - for (i = 0; i < instance->max_fw_cmds; i++) { + for (i = 0; i < instance->max_mpt_cmds; i++) { cmd = fusion->cmd_list[i]; offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); cmd->index = i + 1; cmd->scmd = NULL; - cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ? + cmd->sync_cmd_idx = + (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ? (i - instance->max_scsi_cmds) : (u32)ULONG_MAX; /* Set to Invalid */ cmd->instance = instance; @@ -658,6 +698,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); cmd->io_request_phys_addr = io_req_base_phys + offset; + cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; } if (megasas_create_sg_sense_fusion(instance)) @@ -725,6 +766,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) const char *sys_info; MFI_CAPABILITIES *drv_ops; u32 scratch_pad_2; + unsigned long flags; fusion = instance->ctrl_context; @@ -781,6 +823,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0; IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); IOCInitMessage->HostMSIxVectors = instance->msix_vectors; + IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT; init_frame = (struct megasas_init_frame *)cmd->frame; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); @@ -796,7 +839,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations); /* driver support Extended MSIX */ - if (fusion->adapter_type == INVADER_SERIES) + if (fusion->adapter_type >= INVADER_SERIES) drv_ops->mfi_capabilities.support_additional_msix = 1; /* driver supports HA / Remote LUN over Fast Path interface */ drv_ops->mfi_capabilities.support_fp_remote_lun = 1; @@ -813,6 +856,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) drv_ops->mfi_capabilities.support_ext_queue_depth = 1; drv_ops->mfi_capabilities.support_qd_throttling = 1; + drv_ops->mfi_capabilities.support_pd_map_target_id = 1; /* Convert capability to LE32 */ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); @@ -850,7 +894,14 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) break; } - megasas_fire_cmd_fusion(instance, &req_desc); + /* For Ventura also IOC INIT required 64 bit Descriptor write. */ + spin_lock_irqsave(&instance->hba_lock, flags); + writel(le32_to_cpu(req_desc.u.low), + &instance->reg_set->inbound_low_queue_port); + writel(le32_to_cpu(req_desc.u.high), + &instance->reg_set->inbound_high_queue_port); + mmiowb(); + spin_unlock_irqrestore(&instance->hba_lock, flags); wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); @@ -1009,11 +1060,6 @@ megasas_get_ld_map_info(struct megasas_instance *instance) memset(ci, 0, fusion->max_map_sz); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); -#if VD_EXT_DEBUG - dev_dbg(&instance->pdev->dev, - "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n", - __func__, cpu_to_le32(size_map_info)); -#endif dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; @@ -1065,10 +1111,11 @@ megasas_get_map_info(struct megasas_instance *instance) int megasas_sync_map_info(struct megasas_instance *instance) { - int ret = 0, i; + int i; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; - u32 size_sync_info, num_lds; + u16 num_lds; + u32 size_sync_info; struct fusion_context *fusion; struct MR_LD_TARGET_SYNC *ci = NULL; struct MR_DRV_RAID_MAP_ALL *map; @@ -1134,7 +1181,7 @@ megasas_sync_map_info(struct megasas_instance *instance) instance->instancet->issue_dcmd(instance, cmd); - return ret; + return 0; } /* @@ -1220,7 +1267,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) { struct megasas_register_set __iomem *reg_set; struct fusion_context *fusion; - u32 max_cmd, scratch_pad_2; + u16 max_cmd; + u32 scratch_pad_2; int i = 0, count; fusion = instance->ctrl_context; @@ -1230,13 +1278,6 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); /* - * Reduce the max supported cmds by 1. This is to ensure that the - * reply_q_sz (1 more than the max cmd that driver may send) - * does not exceed max cmds that the FW can support - */ - instance->max_fw_cmds = instance->max_fw_cmds-1; - - /* * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames */ instance->max_mfi_cmds = @@ -1247,12 +1288,12 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16); fusion->request_alloc_sz = - sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd; + sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds; fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *(fusion->reply_q_depth); fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + - (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * - (max_cmd + 1)); /* Extra 1 for SMID 0 */ + (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */ scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2); /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, @@ -1302,7 +1343,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) fusion->last_reply_idx[i] = 0; /* - * For fusion adapters, 3 commands for IOCTL and 5 commands + * For fusion adapters, 3 commands for IOCTL and 8 commands * for driver's internal DCMDs. */ instance->max_scsi_cmds = instance->max_fw_cmds - @@ -1331,6 +1372,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) } instance->flag_ieee = 1; + instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT; fusion->fast_path_io = 0; fusion->drv_map_pages = get_order(fusion->drv_map_sz); @@ -1388,96 +1430,348 @@ fail_alloc_mfi_cmds: */ void -map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status) +map_cmd_status(struct fusion_context *fusion, + struct scsi_cmnd *scmd, u8 status, u8 ext_status, + u32 data_length, u8 *sense) { + u8 cmd_type; + int resid; + cmd_type = megasas_cmd_type(scmd); switch (status) { case MFI_STAT_OK: - cmd->scmd->result = DID_OK << 16; + scmd->result = DID_OK << 16; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: - cmd->scmd->result = (DID_ERROR << 16) | ext_status; + scmd->result = (DID_ERROR << 16) | ext_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: - cmd->scmd->result = (DID_OK << 16) | ext_status; + scmd->result = (DID_OK << 16) | ext_status; if (ext_status == SAM_STAT_CHECK_CONDITION) { - memset(cmd->scmd->sense_buffer, 0, + memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); - memcpy(cmd->scmd->sense_buffer, cmd->sense, + memcpy(scmd->sense_buffer, sense, SCSI_SENSE_BUFFERSIZE); - cmd->scmd->result |= DRIVER_SENSE << 24; + scmd->result |= DRIVER_SENSE << 24; } + + /* + * If the IO request is partially completed, then MR FW will + * update "io_request->DataLength" field with actual number of + * bytes transferred.Driver will set residual bytes count in + * SCSI command structure. + */ + resid = (scsi_bufflen(scmd) - data_length); + scsi_set_resid(scmd, resid); + + if (resid && + ((cmd_type == READ_WRITE_LDIO) || + (cmd_type == READ_WRITE_SYSPDIO))) + scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len" + " requested/completed 0x%x/0x%x\n", + status, scsi_bufflen(scmd), data_length); break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: - cmd->scmd->result = DID_BAD_TARGET << 16; + scmd->result = DID_BAD_TARGET << 16; break; case MFI_STAT_CONFIG_SEQ_MISMATCH: - cmd->scmd->result = DID_IMM_RETRY << 16; + scmd->result = DID_IMM_RETRY << 16; break; default: - dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status); - cmd->scmd->result = DID_ERROR << 16; + scmd->result = DID_ERROR << 16; break; } } /** + * megasas_is_prp_possible - + * Checks if native NVMe PRPs can be built for the IO + * + * @instance: Adapter soft state + * @scmd: SCSI command from the mid-layer + * @sge_count: scatter gather element count. + * + * Returns: true: PRPs can be built + * false: IEEE SGLs needs to be built + */ +static bool +megasas_is_prp_possible(struct megasas_instance *instance, + struct scsi_cmnd *scmd, int sge_count) +{ + struct fusion_context *fusion; + int i; + u32 data_length = 0; + struct scatterlist *sg_scmd; + bool build_prp = false; + u32 mr_nvme_pg_size; + + mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, + MR_DEFAULT_NVME_PAGE_SIZE); + fusion = instance->ctrl_context; + data_length = scsi_bufflen(scmd); + sg_scmd = scsi_sglist(scmd); + + /* + * NVMe uses one PRP for each page (or part of a page) + * look at the data length - if 4 pages or less then IEEE is OK + * if > 5 pages then we need to build a native SGL + * if > 4 and <= 5 pages, then check physical address of 1st SG entry + * if this first size in the page is >= the residual beyond 4 pages + * then use IEEE, otherwise use native SGL + */ + + if (data_length > (mr_nvme_pg_size * 5)) { + build_prp = true; + } else if ((data_length > (mr_nvme_pg_size * 4)) && + (data_length <= (mr_nvme_pg_size * 5))) { + /* check if 1st SG entry size is < residual beyond 4 pages */ + if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4))) + build_prp = true; + } + +/* + * Below code detects gaps/holes in IO data buffers. + * What does holes/gaps mean? + * Any SGE except first one in a SGL starts at non NVME page size + * aligned address OR Any SGE except last one in a SGL ends at + * non NVME page size boundary. + * + * Driver has already informed block layer by setting boundary rules for + * bio merging done at NVME page size boundary calling kernel API + * blk_queue_virt_boundary inside slave_config. + * Still there is possibility of IO coming with holes to driver because of + * IO merging done by IO scheduler. + * + * With SCSI BLK MQ enabled, there will be no IO with holes as there is no + * IO scheduling so no IO merging. + * + * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and + * then sending IOs with holes. + * + * Though driver can request block layer to disable IO merging by calling- + * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but + * user may tune sysfs parameter- nomerges again to 0 or 1. + * + * If in future IO scheduling is enabled with SCSI BLK MQ, + * this algorithm to detect holes will be required in driver + * for SCSI BLK MQ enabled case as well. + * + * + */ + scsi_for_each_sg(scmd, sg_scmd, sge_count, i) { + if ((i != 0) && (i != (sge_count - 1))) { + if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) || + mega_mod64(sg_dma_address(sg_scmd), + mr_nvme_pg_size)) { + build_prp = false; + atomic_inc(&instance->sge_holes_type1); + break; + } + } + + if ((sge_count > 1) && (i == 0)) { + if ((mega_mod64((sg_dma_address(sg_scmd) + + sg_dma_len(sg_scmd)), + mr_nvme_pg_size))) { + build_prp = false; + atomic_inc(&instance->sge_holes_type2); + break; + } + } + + if ((sge_count > 1) && (i == (sge_count - 1))) { + if (mega_mod64(sg_dma_address(sg_scmd), + mr_nvme_pg_size)) { + build_prp = false; + atomic_inc(&instance->sge_holes_type3); + break; + } + } + } + + return build_prp; +} + +/** + * megasas_make_prp_nvme - + * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only + * + * @instance: Adapter soft state + * @scmd: SCSI command from the mid-layer + * @sgl_ptr: SGL to be filled in + * @cmd: Fusion command frame + * @sge_count: scatter gather element count. + * + * Returns: true: PRPs are built + * false: IEEE SGLs needs to be built + */ +static bool +megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, + struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, + struct megasas_cmd_fusion *cmd, int sge_count) +{ + int sge_len, offset, num_prp_in_chain = 0; + struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl; + u64 *ptr_sgl; + dma_addr_t ptr_sgl_phys; + u64 sge_addr; + u32 page_mask, page_mask_result; + struct scatterlist *sg_scmd; + u32 first_prp_len; + bool build_prp = false; + int data_len = scsi_bufflen(scmd); + struct fusion_context *fusion; + u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, + MR_DEFAULT_NVME_PAGE_SIZE); + + fusion = instance->ctrl_context; + + build_prp = megasas_is_prp_possible(instance, scmd, sge_count); + + if (!build_prp) + return false; + + /* + * Nvme has a very convoluted prp format. One prp is required + * for each page or partial page. Driver need to split up OS sg_list + * entries if it is longer than one page or cross a page + * boundary. Driver also have to insert a PRP list pointer entry as + * the last entry in each physical page of the PRP list. + * + * NOTE: The first PRP "entry" is actually placed in the first + * SGL entry in the main message as IEEE 64 format. The 2nd + * entry in the main message is the chain element, and the rest + * of the PRP entries are built in the contiguous pcie buffer. + */ + page_mask = mr_nvme_pg_size - 1; + ptr_sgl = (u64 *)cmd->sg_frame; + ptr_sgl_phys = cmd->sg_frame_phys_addr; + memset(ptr_sgl, 0, instance->max_chain_frame_sz); + + /* Build chain frame element which holds all prps except first*/ + main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *) + ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64)); + + main_chain_element->Address = cpu_to_le64(ptr_sgl_phys); + main_chain_element->NextChainOffset = 0; + main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | + IEEE_SGE_FLAGS_SYSTEM_ADDR | + MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; + + /* Build first prp, sge need not to be page aligned*/ + ptr_first_sgl = sgl_ptr; + sg_scmd = scsi_sglist(scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + + offset = (u32)(sge_addr & page_mask); + first_prp_len = mr_nvme_pg_size - offset; + + ptr_first_sgl->Address = cpu_to_le64(sge_addr); + ptr_first_sgl->Length = cpu_to_le32(first_prp_len); + + data_len -= first_prp_len; + + if (sge_len > first_prp_len) { + sge_addr += first_prp_len; + sge_len -= first_prp_len; + } else if (sge_len == first_prp_len) { + sg_scmd = sg_next(sg_scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + } + + for (;;) { + offset = (u32)(sge_addr & page_mask); + + /* Put PRP pointer due to page boundary*/ + page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask; + if (unlikely(!page_mask_result)) { + scmd_printk(KERN_NOTICE, + scmd, "page boundary ptr_sgl: 0x%p\n", + ptr_sgl); + ptr_sgl_phys += 8; + *ptr_sgl = cpu_to_le64(ptr_sgl_phys); + ptr_sgl++; + num_prp_in_chain++; + } + + *ptr_sgl = cpu_to_le64(sge_addr); + ptr_sgl++; + ptr_sgl_phys += 8; + num_prp_in_chain++; + + sge_addr += mr_nvme_pg_size; + sge_len -= mr_nvme_pg_size; + data_len -= mr_nvme_pg_size; + + if (data_len <= 0) + break; + + if (sge_len > 0) + continue; + + sg_scmd = sg_next(sg_scmd); + sge_addr = sg_dma_address(sg_scmd); + sge_len = sg_dma_len(sg_scmd); + } + + main_chain_element->Length = + cpu_to_le32(num_prp_in_chain * sizeof(u64)); + + atomic_inc(&instance->prp_sgl); + return build_prp; +} + +/** * megasas_make_sgl_fusion - Prepares 32-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @sgl_ptr: SGL to be filled in * @cmd: cmd we are working on + * @sge_count sge count * - * If successful, this function returns the number of SG elements. */ -static int +static void megasas_make_sgl_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, - struct megasas_cmd_fusion *cmd) + struct megasas_cmd_fusion *cmd, int sge_count) { - int i, sg_processed, sge_count; + int i, sg_processed; struct scatterlist *os_sgl; struct fusion_context *fusion; fusion = instance->ctrl_context; - if (fusion->adapter_type == INVADER_SERIES) { + if (fusion->adapter_type >= INVADER_SERIES) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } - sge_count = scsi_dma_map(scp); - - BUG_ON(sge_count < 0); - - if (sge_count > instance->max_num_sge || !sge_count) - return sge_count; - scsi_for_each_sg(scp, os_sgl, sge_count, i) { sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); sgl_ptr->Flags = 0; - if (fusion->adapter_type == INVADER_SERIES) + if (fusion->adapter_type >= INVADER_SERIES) if (i == sge_count - 1) sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; sgl_ptr++; - sg_processed = i + 1; if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && (sge_count > fusion->max_sge_in_main_msg)) { struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; - if (fusion->adapter_type == INVADER_SERIES) { + if (fusion->adapter_type >= INVADER_SERIES) { if ((le16_to_cpu(cmd->io_request->IoFlags) & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) @@ -1493,7 +1787,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, sg_chain = sgl_ptr; /* Prepare chain element */ sg_chain->NextChainOffset = 0; - if (fusion->adapter_type == INVADER_SERIES) + if (fusion->adapter_type >= INVADER_SERIES) sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; else sg_chain->Flags = @@ -1507,6 +1801,45 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, memset(sgl_ptr, 0, instance->max_chain_frame_sz); } } + atomic_inc(&instance->ieee_sgl); +} + +/** + * megasas_make_sgl - Build Scatter Gather List(SGLs) + * @scp: SCSI command pointer + * @instance: Soft instance of controller + * @cmd: Fusion command pointer + * + * This function will build sgls based on device type. + * For nvme drives, there is different way of building sgls in nvme native + * format- PRPs(Physical Region Page). + * + * Returns the number of sg lists actually used, zero if the sg lists + * is NULL, or -ENOMEM if the mapping failed + */ +static +int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp, + struct megasas_cmd_fusion *cmd) +{ + int sge_count; + bool build_prp = false; + struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64; + + sge_count = scsi_dma_map(scp); + + if ((sge_count > instance->max_num_sge) || (sge_count <= 0)) + return sge_count; + + sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL; + if ((le16_to_cpu(cmd->io_request->IoFlags) & + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) && + (cmd->pd_interface == NVME_PD)) + build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64, + cmd, sge_count); + + if (!build_prp) + megasas_make_sgl_fusion(instance, scp, sgl_chain64, + cmd, sge_count); return sge_count; } @@ -1525,7 +1858,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) { struct MR_LD_RAID *raid; - u32 ld; + u16 ld; u64 start_blk = io_info->pdBlock; u8 *cdb = io_request->CDB.CDB32; u32 num_blocks = io_info->numBlocks; @@ -1574,6 +1907,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | + MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); } else { io_request->EEDPFlags = cpu_to_le16( @@ -1688,6 +2022,166 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, } /** + * megasas_stream_detect - stream detection on read and and write IOs + * @instance: Adapter soft state + * @cmd: Command to be prepared + * @io_info: IO Request info + * + */ + +/** stream detection on read and and write IOs */ +static void megasas_stream_detect(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd, + struct IO_REQUEST_INFO *io_info) +{ + struct fusion_context *fusion = instance->ctrl_context; + u32 device_id = io_info->ldTgtId; + struct LD_STREAM_DETECT *current_ld_sd + = fusion->stream_detect_by_ld[device_id]; + u32 *track_stream = ¤t_ld_sd->mru_bit_map, stream_num; + u32 shifted_values, unshifted_values; + u32 index_value_mask, shifted_values_mask; + int i; + bool is_read_ahead = false; + struct STREAM_DETECT *current_sd; + /* find possible stream */ + for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { + stream_num = (*track_stream >> + (i * BITS_PER_INDEX_STREAM)) & + STREAM_MASK; + current_sd = ¤t_ld_sd->stream_track[stream_num]; + /* if we found a stream, update the raid + * context and also update the mruBitMap + */ + /* boundary condition */ + if ((current_sd->next_seq_lba) && + (io_info->ldStartBlock >= current_sd->next_seq_lba) && + (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) && + (current_sd->is_read == io_info->isRead)) { + + if ((io_info->ldStartBlock != current_sd->next_seq_lba) && + ((!io_info->isRead) || (!is_read_ahead))) + /* + * Once the API availible we need to change this. + * At this point we are not allowing any gap + */ + continue; + + SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35); + current_sd->next_seq_lba = + io_info->ldStartBlock + io_info->numBlocks; + /* + * update the mruBitMap LRU + */ + shifted_values_mask = + (1 << i * BITS_PER_INDEX_STREAM) - 1; + shifted_values = ((*track_stream & shifted_values_mask) + << BITS_PER_INDEX_STREAM); + index_value_mask = + STREAM_MASK << i * BITS_PER_INDEX_STREAM; + unshifted_values = + *track_stream & ~(shifted_values_mask | + index_value_mask); + *track_stream = + unshifted_values | shifted_values | stream_num; + return; + } + } + /* + * if we did not find any stream, create a new one + * from the least recently used + */ + stream_num = (*track_stream >> + ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & + STREAM_MASK; + current_sd = ¤t_ld_sd->stream_track[stream_num]; + current_sd->is_read = io_info->isRead; + current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks; + *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num); + return; +} + +/** + * megasas_set_raidflag_cpu_affinity - This function sets the cpu + * affinity (cpu of the controller) and raid_flags in the raid context + * based on IO type. + * + * @praid_context: IO RAID context + * @raid: LD raid map + * @fp_possible: Is fast path possible? + * @is_read: Is read IO? + * + */ +static void +megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, + struct MR_LD_RAID *raid, bool fp_possible, + u8 is_read, u32 scsi_buff_len) +{ + u8 cpu_sel = MR_RAID_CTX_CPUSEL_0; + struct RAID_CONTEXT_G35 *rctx_g35; + + rctx_g35 = &praid_context->raid_context_g35; + if (fp_possible) { + if (is_read) { + if ((raid->cpuAffinity.pdRead.cpu0) && + (raid->cpuAffinity.pdRead.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.pdRead.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + } else { + if ((raid->cpuAffinity.pdWrite.cpu0) && + (raid->cpuAffinity.pdWrite.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.pdWrite.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + /* Fast path cache by pass capable R0/R1 VD */ + if ((raid->level <= 1) && + (raid->capability.fp_cache_bypass_capable)) { + rctx_g35->routing_flags |= + (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT); + rctx_g35->raid_flags = + (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS + << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); + } + } + } else { + if (is_read) { + if ((raid->cpuAffinity.ldRead.cpu0) && + (raid->cpuAffinity.ldRead.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.ldRead.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + } else { + if ((raid->cpuAffinity.ldWrite.cpu0) && + (raid->cpuAffinity.ldWrite.cpu1)) + cpu_sel = MR_RAID_CTX_CPUSEL_FCFS; + else if (raid->cpuAffinity.ldWrite.cpu1) + cpu_sel = MR_RAID_CTX_CPUSEL_1; + + if (is_stream_detected(rctx_g35) && + (raid->level == 5) && + (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && + (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) + cpu_sel = MR_RAID_CTX_CPUSEL_0; + } + } + + rctx_g35->routing_flags |= + (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); + + /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT + * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS. + * IO Subtype is not bitmap. + */ + if ((raid->level == 1) && (!is_read)) { + if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) + praid_context->raid_context_g35.raid_flags = + (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT + << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); + } +} + +/** * megasas_build_ldio_fusion - Prepares IOs to devices * @instance: Adapter soft state * @scp: SCSI command @@ -1701,29 +2195,36 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) { - u8 fp_possible; + bool fp_possible; + u16 ld; u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; + u32 scsi_buff_len; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; struct IO_REQUEST_INFO io_info; struct fusion_context *fusion; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; u8 *raidLUN; + unsigned long spinlock_flags; + union RAID_CONTEXT_UNION *praid_context; + struct MR_LD_RAID *raid = NULL; + struct MR_PRIV_DEVICE *mrdev_priv; device_id = MEGASAS_DEV_INDEX(scp); fusion = instance->ctrl_context; io_request = cmd->io_request; - io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); - io_request->RaidContext.status = 0; - io_request->RaidContext.exStatus = 0; + io_request->RaidContext.raid_context.virtual_disk_tgt_id = + cpu_to_le16(device_id); + io_request->RaidContext.raid_context.status = 0; + io_request->RaidContext.raid_context.ex_status = 0; req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; start_lba_lo = 0; start_lba_hi = 0; - fp_possible = 0; + fp_possible = false; /* * 6-byte READ(0x08) or WRITE(0x0A) cdb @@ -1779,22 +2280,27 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; io_info.numBlocks = datalength; io_info.ldTgtId = device_id; - io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); + io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; + scsi_buff_len = scsi_bufflen(scp); + io_request->DataLength = cpu_to_le32(scsi_buff_len); if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) io_info.isRead = 1; local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; + ld = MR_TargetIdToLdGet(device_id, local_map_ptr); - if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= - instance->fw_supported_vd_count) || (!fusion->fast_path_io)) { - io_request->RaidContext.regLockFlags = 0; - fp_possible = 0; + if (ld < instance->fw_supported_vd_count) + raid = MR_LdRaidGet(ld, local_map_ptr); + + if (!raid || (!fusion->fast_path_io)) { + io_request->RaidContext.raid_context.reg_lock_flags = 0; + fp_possible = false; } else { if (MR_BuildRaidContext(instance, &io_info, - &io_request->RaidContext, + &io_request->RaidContext.raid_context, local_map_ptr, &raidLUN)) - fp_possible = io_info.fpOkForIo; + fp_possible = (io_info.fpOkForIo > 0) ? true : false; } /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU @@ -1803,6 +2309,54 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? raw_smp_processor_id() % instance->msix_vectors : 0; + praid_context = &io_request->RaidContext; + + if (instance->is_ventura) { + spin_lock_irqsave(&instance->stream_lock, spinlock_flags); + megasas_stream_detect(instance, cmd, &io_info); + spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags); + /* In ventura if stream detected for a read and it is read ahead + * capable make this IO as LDIO + */ + if (is_stream_detected(&io_request->RaidContext.raid_context_g35) && + io_info.isRead && io_info.ra_capable) + fp_possible = false; + + /* FP for Optimal raid level 1. + * All large RAID-1 writes (> 32 KiB, both WT and WB modes) + * are built by the driver as LD I/Os. + * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os + * (there is never a reason to process these as buffered writes) + * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os + * with the SLD bit asserted. + */ + if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { + mrdev_priv = scp->device->hostdata; + + if (atomic_inc_return(&instance->fw_outstanding) > + (instance->host->can_queue)) { + fp_possible = false; + atomic_dec(&instance->fw_outstanding); + } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || + atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) { + fp_possible = false; + atomic_dec(&instance->fw_outstanding); + if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) + atomic_set(&mrdev_priv->r1_ldio_hint, + instance->r1_ldio_hint_default); + } + } + + /* If raid is NULL, set CPU affinity to default CPU0 */ + if (raid) + megasas_set_raidflag_cpu_affinity(praid_context, + raid, fp_possible, io_info.isRead, + scsi_buff_len); + else + praid_context->raid_context_g35.routing_flags |= + (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT); + } + if (fp_possible) { megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, local_map_ptr, start_lba_lo); @@ -1811,29 +2365,52 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (fusion->adapter_type == INVADER_SERIES) { - if (io_request->RaidContext.regLockFlags == + if (io_request->RaidContext.raid_context.reg_lock_flags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - io_request->RaidContext.Type = MPI2_TYPE_CUDA; - io_request->RaidContext.nseg = 0x1; + io_request->RaidContext.raid_context.type + = MPI2_TYPE_CUDA; + io_request->RaidContext.raid_context.nseg = 0x1; io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); - io_request->RaidContext.regLockFlags |= + io_request->RaidContext.raid_context.reg_lock_flags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); + } else if (instance->is_ventura) { + io_request->RaidContext.raid_context_g35.nseg_type |= + (1 << RAID_CONTEXT_NSEG_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); + io_request->RaidContext.raid_context_g35.routing_flags |= + (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); + io_request->IoFlags |= + cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); } - if ((fusion->load_balance_info[device_id].loadBalanceFlag) && - (io_info.isRead)) { + if (fusion->load_balance_info && + (fusion->load_balance_info[device_id].loadBalanceFlag) && + (io_info.isRead)) { io_info.devHandle = get_updated_dev_handle(instance, &fusion->load_balance_info[device_id], - &io_info); + &io_info, local_map_ptr); scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; cmd->pd_r1_lb = io_info.pd_after_lb; + if (instance->is_ventura) + io_request->RaidContext.raid_context_g35.span_arm + = io_info.span_arm; + else + io_request->RaidContext.raid_context.span_arm + = io_info.span_arm; + } else scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; + if (instance->is_ventura) + cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; + else + cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; + if ((raidLUN[0] == 1) && (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) { instance->dev_handle = !(instance->dev_handle); @@ -1843,28 +2420,39 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; io_request->DevHandle = io_info.devHandle; + cmd->pd_interface = io_info.pd_interface; /* populate the LUN field */ memcpy(io_request->LUN, raidLUN, 8); } else { - io_request->RaidContext.timeoutValue = + io_request->RaidContext.raid_context.timeout_value = cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); if (fusion->adapter_type == INVADER_SERIES) { if (io_info.do_fp_rlbypass || - (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)) + (io_request->RaidContext.raid_context.reg_lock_flags + == REGION_TYPE_UNUSED)) cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - io_request->RaidContext.Type = MPI2_TYPE_CUDA; - io_request->RaidContext.regLockFlags |= + io_request->RaidContext.raid_context.type + = MPI2_TYPE_CUDA; + io_request->RaidContext.raid_context.reg_lock_flags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE); - io_request->RaidContext.nseg = 0x1; + io_request->RaidContext.raid_context.nseg = 0x1; + } else if (instance->is_ventura) { + io_request->RaidContext.raid_context_g35.routing_flags |= + (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (1 << RAID_CONTEXT_NSEG_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); } io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = cpu_to_le16(device_id); + } /* Not FP */ } @@ -1881,27 +2469,26 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, { u32 device_id; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; - u16 pd_index = 0; + u16 ld; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; struct fusion_context *fusion = instance->ctrl_context; u8 span, physArm; __le16 devHandle; - u32 ld, arRef, pd; + u32 arRef, pd; struct MR_LD_RAID *raid; struct RAID_CONTEXT *pRAID_Context; u8 fp_possible = 1; io_request = cmd->io_request; device_id = MEGASAS_DEV_INDEX(scmd); - pd_index = MEGASAS_PD_INDEX(scmd); local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); /* get RAID_Context pointer */ - pRAID_Context = &io_request->RaidContext; + pRAID_Context = &io_request->RaidContext.raid_context; /* Check with FW team */ - pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); - pRAID_Context->regLockRowLBA = 0; - pRAID_Context->regLockLength = 0; + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); + pRAID_Context->reg_lock_row_lba = 0; + pRAID_Context->reg_lock_length = 0; if (fusion->fast_path_io && ( device_id < instance->fw_supported_vd_count)) { @@ -1909,10 +2496,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, ld = MR_TargetIdToLdGet(device_id, local_map_ptr); if (ld >= instance->fw_supported_vd_count) fp_possible = 0; - - raid = MR_LdRaidGet(ld, local_map_ptr); - if (!(raid->capability.fpNonRWCapable)) - fp_possible = 0; + else { + raid = MR_LdRaidGet(ld, local_map_ptr); + if (!(raid->capability.fpNonRWCapable)) + fp_possible = 0; + } } else fp_possible = 0; @@ -1920,7 +2508,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; io_request->DevHandle = cpu_to_le16(device_id); io_request->LUN[1] = scmd->device->lun; - pRAID_Context->timeoutValue = + pRAID_Context->timeout_value = cpu_to_le16 (scmd->request->timeout / HZ); cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << @@ -1928,9 +2516,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, } else { /* set RAID context values */ - pRAID_Context->configSeqNum = raid->seqNum; - pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; - pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd); + pRAID_Context->config_seq_num = raid->seqNum; + if (!instance->is_ventura) + pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ; + pRAID_Context->timeout_value = + cpu_to_le16(raid->fpIoTimeoutForLd); /* get the DevHandle for the PD (since this is fpNonRWCapable, this is a single disk RAID0) */ @@ -1965,7 +2555,8 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, */ static void megasas_build_syspd_fusion(struct megasas_instance *instance, - struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible) + struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, + bool fp_possible) { u32 device_id; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; @@ -1975,22 +2566,25 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, struct MR_DRV_RAID_MAP_ALL *local_map_ptr; struct RAID_CONTEXT *pRAID_Context; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; + struct MR_PRIV_DEVICE *mr_device_priv_data; struct fusion_context *fusion = instance->ctrl_context; pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1]; device_id = MEGASAS_DEV_INDEX(scmd); pd_index = MEGASAS_PD_INDEX(scmd); os_timeout_value = scmd->request->timeout / HZ; + mr_device_priv_data = scmd->device->hostdata; + cmd->pd_interface = mr_device_priv_data->interface_type; io_request = cmd->io_request; /* get RAID_Context pointer */ - pRAID_Context = &io_request->RaidContext; - pRAID_Context->regLockFlags = 0; - pRAID_Context->regLockRowLBA = 0; - pRAID_Context->regLockLength = 0; + pRAID_Context = &io_request->RaidContext.raid_context; + pRAID_Context->reg_lock_flags = 0; + pRAID_Context->reg_lock_row_lba = 0; + pRAID_Context->reg_lock_length = 0; io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); io_request->LUN[1] = scmd->device->lun; - pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD + pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; /* If FW supports PD sequence number */ @@ -1999,24 +2593,38 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, /* TgtId must be incremented by 255 as jbod seq number is index * below raid map */ - pRAID_Context->VirtualDiskTgtId = - cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); - pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum; + /* More than 256 PD/JBOD support for Ventura */ + if (instance->support_morethan256jbod) + pRAID_Context->virtual_disk_tgt_id = + pd_sync->seq[pd_index].pd_target_id; + else + pRAID_Context->virtual_disk_tgt_id = + cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1)); + pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum; io_request->DevHandle = pd_sync->seq[pd_index].devHandle; - pRAID_Context->regLockFlags |= - (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA); - pRAID_Context->Type = MPI2_TYPE_CUDA; - pRAID_Context->nseg = 0x1; + if (instance->is_ventura) { + io_request->RaidContext.raid_context_g35.routing_flags |= + (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (1 << RAID_CONTEXT_NSEG_SHIFT); + io_request->RaidContext.raid_context_g35.nseg_type |= + (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT); + } else { + pRAID_Context->type = MPI2_TYPE_CUDA; + pRAID_Context->nseg = 0x1; + pRAID_Context->reg_lock_flags |= + (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA); + } } else if (fusion->fast_path_io) { - pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); - pRAID_Context->configSeqNum = 0; + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); + pRAID_Context->config_seq_num = 0; local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; io_request->DevHandle = local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; } else { /* Want to send all IO via FW path */ - pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); - pRAID_Context->configSeqNum = 0; + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); + pRAID_Context->config_seq_num = 0; io_request->DevHandle = cpu_to_le16(0xFFFF); } @@ -2032,17 +2640,17 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value); - pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); + pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); } else { /* system pd Fast Path */ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; timeout_limit = (scmd->device->type == TYPE_DISK) ? 255 : 0xFFFF; - pRAID_Context->timeoutValue = + pRAID_Context->timeout_value = cpu_to_le16((os_timeout_value > timeout_limit) ? timeout_limit : os_timeout_value); - if (fusion->adapter_type == INVADER_SERIES) + if (fusion->adapter_type >= INVADER_SERIES) io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); @@ -2066,9 +2674,11 @@ megasas_build_io_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) { - u16 sge_count; + int sge_count; u8 cmd_type; struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; + struct MR_PRIV_DEVICE *mr_device_priv_data; + mr_device_priv_data = scp->device->hostdata; /* Zero out some fields so they don't get reused */ memset(io_request->LUN, 0x0, 8); @@ -2078,9 +2688,9 @@ megasas_build_io_fusion(struct megasas_instance *instance, io_request->Control = 0; io_request->EEDPBlockSize = 0; io_request->ChainOffset = 0; - io_request->RaidContext.RAIDFlags = 0; - io_request->RaidContext.Type = 0; - io_request->RaidContext.nseg = 0; + io_request->RaidContext.raid_context.raid_flags = 0; + io_request->RaidContext.raid_context.type = 0; + io_request->RaidContext.raid_context.nseg = 0; memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); /* @@ -2097,12 +2707,14 @@ megasas_build_io_fusion(struct megasas_instance *instance, megasas_build_ld_nonrw_fusion(instance, scp, cmd); break; case READ_WRITE_SYSPDIO: + megasas_build_syspd_fusion(instance, scp, cmd, true); + break; case NON_READ_WRITE_SYSPDIO: - if (instance->secure_jbod_support && - (cmd_type == NON_READ_WRITE_SYSPDIO)) - megasas_build_syspd_fusion(instance, scp, cmd, 0); + if (instance->secure_jbod_support || + mr_device_priv_data->is_tm_capable) + megasas_build_syspd_fusion(instance, scp, cmd, false); else - megasas_build_syspd_fusion(instance, scp, cmd, 1); + megasas_build_syspd_fusion(instance, scp, cmd, true); break; default: break; @@ -2112,23 +2724,27 @@ megasas_build_io_fusion(struct megasas_instance *instance, * Construct SGL */ - sge_count = - megasas_make_sgl_fusion(instance, scp, - (struct MPI25_IEEE_SGE_CHAIN64 *) - &io_request->SGL, cmd); + sge_count = megasas_make_sgl(instance, scp, cmd); - if (sge_count > instance->max_num_sge) { - dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds " - "max (0x%x) allowed\n", sge_count, - instance->max_num_sge); + if (sge_count > instance->max_num_sge || (sge_count < 0)) { + dev_err(&instance->pdev->dev, + "%s %d sge_count (%d) is out of range. Range is: 0-%d\n", + __func__, __LINE__, sge_count, instance->max_num_sge); return 1; } - /* numSGE store lower 8 bit of sge_count. - * numSGEExt store higher 8 bit of sge_count - */ - io_request->RaidContext.numSGE = sge_count; - io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8); + if (instance->is_ventura) { + set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count); + cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags); + cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type); + } else { + /* numSGE store lower 8 bit of sge_count. + * numSGEExt store higher 8 bit of sge_count + */ + io_request->RaidContext.raid_context.num_sge = sge_count; + io_request->RaidContext.raid_context.num_sge_ext = + (u8)(sge_count >> 8); + } io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); @@ -2149,25 +2765,61 @@ megasas_build_io_fusion(struct megasas_instance *instance, return 0; } -union MEGASAS_REQUEST_DESCRIPTOR_UNION * +static union MEGASAS_REQUEST_DESCRIPTOR_UNION * megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) { u8 *p; struct fusion_context *fusion; - if (index >= instance->max_fw_cmds) { - dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for " - "descriptor for scsi%d\n", index, - instance->host->host_no); - return NULL; - } fusion = instance->ctrl_context; - p = fusion->req_frames_desc - +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index; + p = fusion->req_frames_desc + + sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index; return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; } + +/* megasas_prepate_secondRaid1_IO + * It prepares the raid 1 second IO + */ +void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd, + struct megasas_cmd_fusion *r1_cmd) +{ + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; + struct fusion_context *fusion; + fusion = instance->ctrl_context; + req_desc = cmd->request_desc; + /* copy the io request frame as well as 8 SGEs data for r1 command*/ + memcpy(r1_cmd->io_request, cmd->io_request, + (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST))); + memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, + (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION))); + /*sense buffer is different for r1 command*/ + r1_cmd->io_request->SenseBufferLowAddress = + cpu_to_le32(r1_cmd->sense_phys_addr); + r1_cmd->scmd = cmd->scmd; + req_desc2 = megasas_get_request_descriptor(instance, + (r1_cmd->index - 1)); + req_desc2->Words = 0; + r1_cmd->request_desc = req_desc2; + req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index); + req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; + r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; + r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; + r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; + cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid = + cpu_to_le16(r1_cmd->index); + r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid = + cpu_to_le16(cmd->index); + /*MSIxIndex of both commands request descriptors should be same*/ + r1_cmd->request_desc->SCSIIO.MSIxIndex = + cmd->request_desc->SCSIIO.MSIxIndex; + /*span arm is different for r1 cmd*/ + r1_cmd->io_request->RaidContext.raid_context_g35.span_arm = + cmd->io_request->RaidContext.raid_context_g35.span_arm + 1; +} + /** * megasas_build_and_issue_cmd_fusion -Main routine for building and * issuing non IOCTL cmd @@ -2178,7 +2830,7 @@ static u32 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, struct scsi_cmnd *scmd) { - struct megasas_cmd_fusion *cmd; + struct megasas_cmd_fusion *cmd, *r1_cmd = NULL; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; u32 index; struct fusion_context *fusion; @@ -2193,13 +2845,22 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, return SCSI_MLQUEUE_DEVICE_BUSY; } + if (atomic_inc_return(&instance->fw_outstanding) > + instance->host->can_queue) { + atomic_dec(&instance->fw_outstanding); + return SCSI_MLQUEUE_HOST_BUSY; + } + cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); + if (!cmd) { + atomic_dec(&instance->fw_outstanding); + return SCSI_MLQUEUE_HOST_BUSY; + } + index = cmd->index; req_desc = megasas_get_request_descriptor(instance, index-1); - if (!req_desc) - return SCSI_MLQUEUE_HOST_BUSY; req_desc->Words = 0; cmd->request_desc = req_desc; @@ -2208,6 +2869,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, megasas_return_cmd_fusion(instance, cmd); dev_err(&instance->pdev->dev, "Error building command\n"); cmd->request_desc = NULL; + atomic_dec(&instance->fw_outstanding); return SCSI_MLQUEUE_HOST_BUSY; } @@ -2218,18 +2880,92 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, cmd->io_request->ChainOffset != 0xF) dev_err(&instance->pdev->dev, "The chain offset value is not " "correct : %x\n", cmd->io_request->ChainOffset); + /* + * if it is raid 1/10 fp write capable. + * try to get second command from pool and construct it. + * From FW, it has confirmed that lba values of two PDs + * corresponds to single R1/10 LD are always same + * + */ + /* driver side count always should be less than max_fw_cmds + * to get new command + */ + if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { + r1_cmd = megasas_get_cmd_fusion(instance, + (scmd->request->tag + instance->max_fw_cmds)); + megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); + } + /* * Issue the command to the FW */ - atomic_inc(&instance->fw_outstanding); megasas_fire_cmd_fusion(instance, req_desc); + if (r1_cmd) + megasas_fire_cmd_fusion(instance, r1_cmd->request_desc); + + return 0; } /** + * megasas_complete_r1_command - + * completes R1 FP write commands which has valid peer smid + * @instance: Adapter soft state + * @cmd_fusion: MPT command frame + * + */ +static inline void +megasas_complete_r1_command(struct megasas_instance *instance, + struct megasas_cmd_fusion *cmd) +{ + u8 *sense, status, ex_status; + u32 data_length; + u16 peer_smid; + struct fusion_context *fusion; + struct megasas_cmd_fusion *r1_cmd = NULL; + struct scsi_cmnd *scmd_local = NULL; + struct RAID_CONTEXT_G35 *rctx_g35; + + rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35; + fusion = instance->ctrl_context; + peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid); + + r1_cmd = fusion->cmd_list[peer_smid - 1]; + scmd_local = cmd->scmd; + status = rctx_g35->status; + ex_status = rctx_g35->ex_status; + data_length = cmd->io_request->DataLength; + sense = cmd->sense; + + cmd->cmd_completed = true; + + /* Check if peer command is completed or not*/ + if (r1_cmd->cmd_completed) { + rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35; + if (rctx_g35->status != MFI_STAT_OK) { + status = rctx_g35->status; + ex_status = rctx_g35->ex_status; + data_length = r1_cmd->io_request->DataLength; + sense = r1_cmd->sense; + } + + megasas_return_cmd_fusion(instance, r1_cmd); + map_cmd_status(fusion, scmd_local, status, ex_status, + le32_to_cpu(data_length), sense); + if (instance->ldio_threshold && + megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) + atomic_dec(&instance->ldio_outstanding); + scmd_local->SCp.ptr = NULL; + megasas_return_cmd_fusion(instance, cmd); + scsi_dma_unmap(scmd_local); + scmd_local->scsi_done(scmd_local); + } +} + +/** * complete_cmd_fusion - Completes command * @instance: Adapter soft state * Completes all commands that is in reply descriptor queue @@ -2244,8 +2980,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) struct megasas_cmd *cmd_mfi; struct megasas_cmd_fusion *cmd_fusion; u16 smid, num_completed; - u8 reply_descript_type; - u32 status, extStatus, device_id; + u8 reply_descript_type, *sense, status, extStatus; + u32 device_id, data_length; union desc_value d_val; struct LD_LOAD_BALANCE_INFO *lbinfo; int threshold_reply_count = 0; @@ -2275,20 +3011,17 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) while (d_val.u.low != cpu_to_le32(UINT_MAX) && d_val.u.high != cpu_to_le32(UINT_MAX)) { - smid = le16_to_cpu(reply_desc->SMID); + smid = le16_to_cpu(reply_desc->SMID); cmd_fusion = fusion->cmd_list[smid - 1]; - - scsi_io_req = - (struct MPI2_RAID_SCSI_IO_REQUEST *) - cmd_fusion->io_request; - - if (cmd_fusion->scmd) - cmd_fusion->scmd->SCp.ptr = NULL; + scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) + cmd_fusion->io_request; scmd_local = cmd_fusion->scmd; - status = scsi_io_req->RaidContext.status; - extStatus = scsi_io_req->RaidContext.exStatus; + status = scsi_io_req->RaidContext.raid_context.status; + extStatus = scsi_io_req->RaidContext.raid_context.ex_status; + sense = cmd_fusion->sense; + data_length = scsi_io_req->DataLength; switch (scsi_io_req->Function) { case MPI2_FUNCTION_SCSI_TASK_MGMT: @@ -2303,37 +3036,33 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) break; case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ /* Update load balancing info */ - device_id = MEGASAS_DEV_INDEX(scmd_local); - lbinfo = &fusion->load_balance_info[device_id]; - if (cmd_fusion->scmd->SCp.Status & - MEGASAS_LOAD_BALANCE_FLAG) { + if (fusion->load_balance_info && + (cmd_fusion->scmd->SCp.Status & + MEGASAS_LOAD_BALANCE_FLAG)) { + device_id = MEGASAS_DEV_INDEX(scmd_local); + lbinfo = &fusion->load_balance_info[device_id]; atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); - cmd_fusion->scmd->SCp.Status &= - ~MEGASAS_LOAD_BALANCE_FLAG; + cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; } - if (reply_descript_type == - MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { - if (megasas_dbg_lvl == 5) - dev_err(&instance->pdev->dev, "\nFAST Path " - "IO Success\n"); - } - /* Fall thru and complete IO */ + //Fall thru and complete IO case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ - /* Map the FW Cmd Status */ - map_cmd_status(cmd_fusion, status, extStatus); - scsi_io_req->RaidContext.status = 0; - scsi_io_req->RaidContext.exStatus = 0; - if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) - atomic_dec(&instance->ldio_outstanding); - megasas_return_cmd_fusion(instance, cmd_fusion); - scsi_dma_unmap(scmd_local); - scmd_local->scsi_done(scmd_local); atomic_dec(&instance->fw_outstanding); - + if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) { + map_cmd_status(fusion, scmd_local, status, + extStatus, le32_to_cpu(data_length), + sense); + if (instance->ldio_threshold && + (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)) + atomic_dec(&instance->ldio_outstanding); + scmd_local->SCp.ptr = NULL; + megasas_return_cmd_fusion(instance, cmd_fusion); + scsi_dma_unmap(scmd_local); + scmd_local->scsi_done(scmd_local); + } else /* Optimal VD - R1 FP command completion. */ + megasas_complete_r1_command(instance, cmd_fusion); break; case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; - /* Poll mode. Dummy free. * In case of Interrupt mode, caller has reverse check. */ @@ -2376,7 +3105,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) * pending to be completed */ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { - if (fusion->adapter_type == INVADER_SERIES) + if (instance->msix_combined) writel(((MSIxIndex & 0x7) << 24) | fusion->last_reply_idx[MSIxIndex], instance->reply_post_host_index_addr[MSIxIndex/8]); @@ -2392,7 +3121,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) return IRQ_NONE; wmb(); - if (fusion->adapter_type == INVADER_SERIES) + if (instance->msix_combined) writel(((MSIxIndex & 0x7) << 24) | fusion->last_reply_idx[MSIxIndex], instance->reply_post_host_index_addr[MSIxIndex/8]); @@ -2405,6 +3134,22 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) } /** + * megasas_sync_irqs - Synchronizes all IRQs owned by adapter + * @instance: Adapter soft state + */ +void megasas_sync_irqs(unsigned long instance_addr) +{ + u32 count, i; + struct megasas_instance *instance = + (struct megasas_instance *)instance_addr; + + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + + for (i = 0; i < count; i++) + synchronize_irq(pci_irq_vector(instance->pdev, i)); +} + +/** * megasas_complete_cmd_dpc_fusion - Completes command * @instance: Adapter soft state * @@ -2489,7 +3234,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp) * mfi_cmd: megasas_cmd pointer * */ -u8 +void build_mpt_mfi_pass_thru(struct megasas_instance *instance, struct megasas_cmd *mfi_cmd) { @@ -2518,7 +3263,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, io_req = cmd->io_request; - if (fusion->adapter_type == INVADER_SERIES) { + if (fusion->adapter_type >= INVADER_SERIES) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; @@ -2539,8 +3284,6 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz); - - return 0; } /** @@ -2552,21 +3295,14 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, union MEGASAS_REQUEST_DESCRIPTOR_UNION * build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { - union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL; u16 index; - if (build_mpt_mfi_pass_thru(instance, cmd)) { - dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n"); - return NULL; - } - + build_mpt_mfi_pass_thru(instance, cmd); index = cmd->context.smid; req_desc = megasas_get_request_descriptor(instance, index - 1); - if (!req_desc) - return NULL; - req_desc->Words = 0; req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); @@ -2582,21 +3318,16 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) * @cmd: mfi cmd pointer * */ -int +void megasas_issue_dcmd_fusion(struct megasas_instance *instance, struct megasas_cmd *cmd) { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; req_desc = build_mpt_cmd(instance, cmd); - if (!req_desc) { - dev_info(&instance->pdev->dev, "Failed from %s %d\n", - __func__, __LINE__); - return DCMD_NOT_FIRED; - } megasas_fire_cmd_fusion(instance, req_desc); - return DCMD_SUCCESS; + return; } /** @@ -2771,6 +3502,14 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, " will reset adapter scsi%d.\n", instance->host->host_no); megasas_complete_cmd_dpc_fusion((unsigned long)instance); + if (instance->requestorId && reason) { + dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT" + " state while polling during" + " I/O timeout handling for %d\n", + instance->host->host_no); + *convert = 1; + } + retval = 1; goto out; } @@ -2790,7 +3529,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, } /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ - if (instance->requestorId && reason) { + if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) { if (instance->hb_host_mem->HB.fwCounter != instance->hb_host_mem->HB.driverCounter) { instance->hb_host_mem->HB.driverCounter = @@ -3030,12 +3769,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, req_desc = megasas_get_request_descriptor(instance, (cmd_fusion->index - 1)); - if (!req_desc) { - dev_err(&instance->pdev->dev, "Failed from %s %d\n", - __func__, __LINE__); - megasas_return_cmd(instance, cmd_mfi); - return -ENOMEM; - } cmd_fusion->request_desc = req_desc; req_desc->Words = 0; @@ -3092,7 +3825,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, break; else { instance->instancet->disable_intr(instance); - msleep(1000); + megasas_sync_irqs((unsigned long)instance); megasas_complete_cmd_dpc_fusion ((unsigned long)instance); instance->instancet->enable_intr(instance); @@ -3173,13 +3906,13 @@ static u16 megasas_get_tm_devhandle(struct scsi_device *sdev) instance = (struct megasas_instance *)sdev->host->hostdata; fusion = instance->ctrl_context; - if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { + if (!MEGASAS_IS_LOGICAL(sdev)) { if (instance->use_seqnum_jbod_fp) { - pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + - sdev->id; - pd_sync = (void *)fusion->pd_seq_sync - [(instance->pd_seq_map_id - 1) & 1]; - devhandle = pd_sync->seq[pd_index].devHandle; + pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + + sdev->id; + pd_sync = (void *)fusion->pd_seq_sync + [(instance->pd_seq_map_id - 1) & 1]; + devhandle = pd_sync->seq[pd_index].devHandle; } else sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable" " without JBOD MAP support from %s %d\n", __func__, __LINE__); @@ -3212,6 +3945,9 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd) instance = (struct megasas_instance *)scmd->device->host->hostdata; fusion = instance->ctrl_context; + scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd); + scsi_print_command(scmd); + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," "SCSI host:%d\n", instance->host->host_no); @@ -3292,6 +4028,9 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd) instance = (struct megasas_instance *)scmd->device->host->hostdata; fusion = instance->ctrl_context; + sdev_printk(KERN_INFO, scmd->device, + "target reset called for scmd(%p)\n", scmd); + if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL," "SCSI host:%d\n", instance->host->host_no); @@ -3362,7 +4101,7 @@ int megasas_check_mpio_paths(struct megasas_instance *instance, struct scsi_cmnd *scmd) { struct megasas_instance *peer_instance = NULL; - int retval = (DID_RESET << 16); + int retval = (DID_REQUEUE << 16); if (instance->peerIsPresent) { peer_instance = megasas_get_peer_instance(instance); @@ -3377,9 +4116,9 @@ int megasas_check_mpio_paths(struct megasas_instance *instance, /* Core fusion reset function */ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) { - int retval = SUCCESS, i, convert = 0; + int retval = SUCCESS, i, j, convert = 0; struct megasas_instance *instance; - struct megasas_cmd_fusion *cmd_fusion; + struct megasas_cmd_fusion *cmd_fusion, *r1_cmd; struct fusion_context *fusion; u32 abs_state, status_reg, reset_adapter; u32 io_timeout_in_crash_mode = 0; @@ -3440,7 +4179,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); instance->instancet->disable_intr(instance); - msleep(1000); + megasas_sync_irqs((unsigned long)instance); /* First try waiting for commands to complete */ if (megasas_wait_for_outstanding_fusion(instance, reason, @@ -3451,23 +4190,40 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) if (convert) reason = 0; + if (megasas_dbg_lvl & OCR_LOGS) + dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n"); + /* Now return commands back to the OS */ for (i = 0 ; i < instance->max_scsi_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; + /*check for extra commands issued by driver*/ + if (instance->is_ventura) { + r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds]; + megasas_return_cmd_fusion(instance, r1_cmd); + } scmd_local = cmd_fusion->scmd; if (cmd_fusion->scmd) { + if (megasas_dbg_lvl & OCR_LOGS) { + sdev_printk(KERN_INFO, + cmd_fusion->scmd->device, "SMID: 0x%x\n", + cmd_fusion->index); + scsi_print_command(cmd_fusion->scmd); + } + scmd_local->result = megasas_check_mpio_paths(instance, scmd_local); - if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) + if (instance->ldio_threshold && + megasas_cmd_type(scmd_local) == READ_WRITE_LDIO) atomic_dec(&instance->ldio_outstanding); megasas_return_cmd_fusion(instance, cmd_fusion); scsi_dma_unmap(scmd_local); scmd_local->scsi_done(scmd_local); - atomic_dec(&instance->fw_outstanding); } } + atomic_set(&instance->fw_outstanding, 0); + status_reg = instance->instancet->read_fw_status_reg( instance->reg_set); abs_state = status_reg & MFI_STATE_MASK; @@ -3528,11 +4284,13 @@ transition_to_ready: __func__, __LINE__); megaraid_sas_kill_hba(instance); retval = FAILED; + goto out; } /* Reset load balance info */ - memset(fusion->load_balance_info, 0, - sizeof(struct LD_LOAD_BALANCE_INFO) - *MAX_LOGICAL_DRIVES_EXT); + if (fusion->load_balance_info) + memset(fusion->load_balance_info, 0, + (sizeof(struct LD_LOAD_BALANCE_INFO) * + MAX_LOGICAL_DRIVES_EXT)); if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); @@ -3540,7 +4298,17 @@ transition_to_ready: megasas_setup_jbod_map(instance); shost_for_each_device(sdev, shost) - megasas_update_sdev_properties(sdev); + megasas_set_dynamic_target_properties(sdev); + + /* reset stream detection array */ + if (instance->is_ventura) { + for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { + memset(fusion->stream_detect_by_ld[j], + 0, sizeof(struct LD_STREAM_DETECT)); + fusion->stream_detect_by_ld[j]->mru_bit_map + = MR_STREAM_BITMAP; + } + } clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); @@ -3676,6 +4444,64 @@ void megasas_fusion_ocr_wq(struct work_struct *work) megasas_reset_fusion(instance->host, 0); } +/* Allocate fusion context */ +int +megasas_alloc_fusion_context(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + + instance->ctrl_context_pages = get_order(sizeof(struct fusion_context)); + instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + instance->ctrl_context_pages); + if (!instance->ctrl_context) { + /* fall back to using vmalloc for fusion_context */ + instance->ctrl_context = vzalloc(sizeof(struct fusion_context)); + if (!instance->ctrl_context) { + dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); + return -ENOMEM; + } + } + + fusion = instance->ctrl_context; + + fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT * + sizeof(struct LD_LOAD_BALANCE_INFO)); + fusion->load_balance_info = + (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + fusion->load_balance_info_pages); + if (!fusion->load_balance_info) { + fusion->load_balance_info = vzalloc(MAX_LOGICAL_DRIVES_EXT * + sizeof(struct LD_LOAD_BALANCE_INFO)); + if (!fusion->load_balance_info) + dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, " + "continuing without Load Balance support\n"); + } + + return 0; +} + +void +megasas_free_fusion_context(struct megasas_instance *instance) +{ + struct fusion_context *fusion = instance->ctrl_context; + + if (fusion) { + if (fusion->load_balance_info) { + if (is_vmalloc_addr(fusion->load_balance_info)) + vfree(fusion->load_balance_info); + else + free_pages((ulong)fusion->load_balance_info, + fusion->load_balance_info_pages); + } + + if (is_vmalloc_addr(fusion)) + vfree(fusion); + else + free_pages((ulong)fusion, + instance->ctrl_context_pages); + } +} + struct megasas_instance_template megasas_instance_template_fusion = { .enable_intr = megasas_enable_intr_fusion, .disable_intr = megasas_disable_intr_fusion, diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index e3bee04c1eb1..d78d76112501 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -59,6 +59,8 @@ #define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10 #define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80 #define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8 +#define MR_RL_WRITE_THROUGH_MODE 0x00 +#define MR_RL_WRITE_BACK_MODE 0x01 /* T10 PI defines */ #define MR_PROT_INFO_TYPE_CONTROLLER 0x8 @@ -81,6 +83,11 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0, MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1, + MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2, + MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3, + MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4, + MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6, + MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7 }; /* @@ -94,11 +101,13 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { #define MEGASAS_FP_CMD_LEN 16 #define MEGASAS_FUSION_IN_RESET 0 #define THRESHOLD_REPLY_COUNT 50 +#define RAID_1_PEER_CMDS 2 #define JBOD_MAPS_COUNT 2 enum MR_FUSION_ADAPTER_TYPE { THUNDERBOLT_SERIES = 0, INVADER_SERIES = 1, + VENTURA_SERIES = 2, }; /* @@ -108,29 +117,133 @@ enum MR_FUSION_ADAPTER_TYPE { struct RAID_CONTEXT { #if defined(__BIG_ENDIAN_BITFIELD) - u8 nseg:4; - u8 Type:4; + u8 nseg:4; + u8 type:4; #else - u8 Type:4; - u8 nseg:4; + u8 type:4; + u8 nseg:4; #endif - u8 resvd0; - __le16 timeoutValue; - u8 regLockFlags; - u8 resvd1; - __le16 VirtualDiskTgtId; - __le64 regLockRowLBA; - __le32 regLockLength; - __le16 nextLMId; - u8 exStatus; - u8 status; - u8 RAIDFlags; - u8 numSGE; - __le16 configSeqNum; - u8 spanArm; - u8 priority; - u8 numSGEExt; - u8 resvd2; + u8 resvd0; + __le16 timeout_value; + u8 reg_lock_flags; + u8 resvd1; + __le16 virtual_disk_tgt_id; + __le64 reg_lock_row_lba; + __le32 reg_lock_length; + __le16 next_lmid; + u8 ex_status; + u8 status; + u8 raid_flags; + u8 num_sge; + __le16 config_seq_num; + u8 span_arm; + u8 priority; + u8 num_sge_ext; + u8 resvd2; +}; + +/* + * Raid Context structure which describes ventura MegaRAID specific + * IO Paramenters ,This resides at offset 0x60 where the SGL normally + * starts in MPT IO Frames + */ +struct RAID_CONTEXT_G35 { + #define RAID_CONTEXT_NSEG_MASK 0x00F0 + #define RAID_CONTEXT_NSEG_SHIFT 4 + #define RAID_CONTEXT_TYPE_MASK 0x000F + #define RAID_CONTEXT_TYPE_SHIFT 0 + u16 nseg_type; + u16 timeout_value; /* 0x02 -0x03 */ + u16 routing_flags; // 0x04 -0x05 routing flags + u16 virtual_disk_tgt_id; /* 0x06 -0x07 */ + u64 reg_lock_row_lba; /* 0x08 - 0x0F */ + u32 reg_lock_length; /* 0x10 - 0x13 */ + union { + u16 next_lmid; /* 0x14 - 0x15 */ + u16 peer_smid; /* used for the raid 1/10 fp writes */ + } smid; + u8 ex_status; /* 0x16 : OUT */ + u8 status; /* 0x17 status */ + u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4], + * resvd[3:1], preferredCpu[0] + */ + u8 span_arm; /* 0x1C span[7:5], arm[4:0] */ + u16 config_seq_num; /* 0x1A -0x1B */ + union { + /* + * Bit format: + * --------------------------------- + * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * --------------------------------- + * Byte0 | numSGE[7]- numSGE[0] | + * --------------------------------- + * Byte1 |SD | resvd | numSGE 8-11 | + * -------------------------------- + */ + #define NUM_SGE_MASK_LOWER 0xFF + #define NUM_SGE_MASK_UPPER 0x0F + #define NUM_SGE_SHIFT_UPPER 8 + #define STREAM_DETECT_SHIFT 7 + #define STREAM_DETECT_MASK 0x80 + struct { +#if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */ + u16 stream_detected:1; + u16 reserved:3; + u16 num_sge:12; +#else + u16 num_sge:12; + u16 reserved:3; + u16 stream_detected:1; +#endif + } bits; + u8 bytes[2]; + } u; + u8 resvd2[2]; /* 0x1E-0x1F */ +}; + +#define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT 1 +#define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT 2 +#define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT 3 +#define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT 4 +#define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT 5 +#define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT 6 +#define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT 7 +#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT 8 +#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK 0x0F00 +#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT 12 +#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK 0xF000 + +static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35, + u16 sge_count) +{ + rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER); + rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER) + & NUM_SGE_MASK_UPPER); +} + +static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35) +{ + u16 sge_count; + + sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER) + << NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0])); + return sge_count; +} + +#define SET_STREAM_DETECTED(rctx_g35) \ + (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK) + +#define CLEAR_STREAM_DETECTED(rctx_g35) \ + (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK)) + +static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35) +{ + return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK)); +} + +union RAID_CONTEXT_UNION { + struct RAID_CONTEXT raid_context; + struct RAID_CONTEXT_G35 raid_context_g35; }; #define RAID_CTX_SPANARM_ARM_SHIFT (0) @@ -139,6 +252,14 @@ struct RAID_CONTEXT { #define RAID_CTX_SPANARM_SPAN_SHIFT (5) #define RAID_CTX_SPANARM_SPAN_MASK (0xE0) +/* number of bits per index in U32 TrackStream */ +#define BITS_PER_INDEX_STREAM 4 +#define INVALID_STREAM_NUM 16 +#define MR_STREAM_BITMAP 0x76543210 +#define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1) +#define ZERO_LAST_STREAM 0x0fffffff +#define MAX_STREAMS_TRACKED 8 + /* * define region lock types */ @@ -175,6 +296,8 @@ enum REGION_TYPE { #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) +/* EEDP escape mode */ +#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ #define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03) @@ -407,7 +530,7 @@ struct MPI2_RAID_SCSI_IO_REQUEST { u8 LUN[8]; /* 0x34 */ __le32 Control; /* 0x3C */ union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ - struct RAID_CONTEXT RaidContext; /* 0x60 */ + union RAID_CONTEXT_UNION RaidContext; /* 0x60 */ union MPI2_SGE_IO_UNION SGL; /* 0x80 */ }; @@ -563,7 +686,7 @@ struct MPI2_IOC_INIT_REQUEST { __le16 HeaderVersion; /* 0x0E */ u32 Reserved5; /* 0x10 */ __le16 Reserved6; /* 0x14 */ - u8 Reserved7; /* 0x16 */ + u8 HostPageSize; /* 0x16 */ u8 HostMSIxVectors; /* 0x17 */ __le16 Reserved8; /* 0x18 */ __le16 SystemRequestFrameSize; /* 0x1A */ @@ -579,6 +702,7 @@ struct MPI2_IOC_INIT_REQUEST { /* mrpriv defines */ #define MR_PD_INVALID 0xFFFF +#define MR_DEVHANDLE_INVALID 0xFFFF #define MAX_SPAN_DEPTH 8 #define MAX_QUAD_DEPTH MAX_SPAN_DEPTH #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH) @@ -586,16 +710,20 @@ struct MPI2_IOC_INIT_REQUEST { #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) #define MAX_LOGICAL_DRIVES 64 #define MAX_LOGICAL_DRIVES_EXT 256 +#define MAX_LOGICAL_DRIVES_DYN 512 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) #define MAX_ARRAYS 128 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS) #define MAX_ARRAYS_EXT 256 #define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT) +#define MAX_API_ARRAYS_DYN 512 #define MAX_PHYSICAL_DEVICES 256 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) +#define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102 +#define MR_DCMD_DRV_GET_TARGET_PROP 0x0200e103 #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/ #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200 @@ -603,7 +731,7 @@ struct MPI2_IOC_INIT_REQUEST { struct MR_DEV_HANDLE_INFO { __le16 curDevHdl; u8 validHandles; - u8 reserved; + u8 interfaceType; __le16 devHandle[2]; }; @@ -640,10 +768,56 @@ struct MR_SPAN_BLOCK_INFO { struct MR_SPAN_INFO block_span_info; }; +#define MR_RAID_CTX_CPUSEL_0 0 +#define MR_RAID_CTX_CPUSEL_1 1 +#define MR_RAID_CTX_CPUSEL_2 2 +#define MR_RAID_CTX_CPUSEL_3 3 +#define MR_RAID_CTX_CPUSEL_FCFS 0xF + +struct MR_CPU_AFFINITY_MASK { + union { + struct { +#ifndef MFI_BIG_ENDIAN + u8 hw_path:1; + u8 cpu0:1; + u8 cpu1:1; + u8 cpu2:1; + u8 cpu3:1; + u8 reserved:3; +#else + u8 reserved:3; + u8 cpu3:1; + u8 cpu2:1; + u8 cpu1:1; + u8 cpu0:1; + u8 hw_path:1; +#endif + }; + u8 core_mask; + }; +}; + +struct MR_IO_AFFINITY { + union { + struct { + struct MR_CPU_AFFINITY_MASK pdRead; + struct MR_CPU_AFFINITY_MASK pdWrite; + struct MR_CPU_AFFINITY_MASK ldRead; + struct MR_CPU_AFFINITY_MASK ldWrite; + }; + u32 word; + }; + u8 maxCores; /* Total cores + HW Path in ROC */ + u8 reserved[3]; +}; + struct MR_LD_RAID { struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved4:5; + u32 reserved4:2; + u32 fp_cache_bypass_capable:1; + u32 fp_rmw_capable:1; + u32 disable_coalescing:1; u32 fpBypassRegionLock:1; u32 tmCapable:1; u32 fpNonRWCapable:1; @@ -654,11 +828,13 @@ struct MR_LD_RAID { u32 encryptionType:8; u32 pdPiMode:4; u32 ldPiMode:4; - u32 reserved5:3; + u32 reserved5:2; + u32 ra_capable:1; u32 fpCapable:1; #else u32 fpCapable:1; - u32 reserved5:3; + u32 ra_capable:1; + u32 reserved5:2; u32 ldPiMode:4; u32 pdPiMode:4; u32 encryptionType:8; @@ -669,7 +845,10 @@ struct MR_LD_RAID { u32 fpNonRWCapable:1; u32 tmCapable:1; u32 fpBypassRegionLock:1; - u32 reserved4:5; + u32 disable_coalescing:1; + u32 fp_rmw_capable:1; + u32 fp_cache_bypass_capable:1; + u32 reserved4:2; #endif } capability; __le32 reserved6; @@ -696,7 +875,36 @@ struct MR_LD_RAID { u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */ u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/ - u8 reserved3[0x80-0x2D]; /* 0x2D */ + /* Ox2D This LD accept priority boost of this type */ + u8 ld_accept_priority_type; + u8 reserved2[2]; /* 0x2E - 0x2F */ + /* 0x30 - 0x33, Logical block size for the LD */ + u32 logical_block_length; + struct { +#ifndef MFI_BIG_ENDIAN + /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */ + u32 ld_pi_exp:4; + /* 0x34, LOGICAL BLOCKS PER PHYSICAL + * BLOCK EXPONENT from READ CAPACITY 16 + */ + u32 ld_logical_block_exp:4; + u32 reserved1:24; /* 0x34 */ +#else + u32 reserved1:24; /* 0x34 */ + /* 0x34, LOGICAL BLOCKS PER PHYSICAL + * BLOCK EXPONENT from READ CAPACITY 16 + */ + u32 ld_logical_block_exp:4; + /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */ + u32 ld_pi_exp:4; +#endif + }; /* 0x34 - 0x37 */ + /* 0x38 - 0x3f, This will determine which + * core will process LD IO and PD IO. + */ + struct MR_IO_AFFINITY cpuAffinity; + /* Bit definiations are specified by MR_IO_AFFINITY */ + u8 reserved3[0x80 - 0x40]; /* 0x40 - 0x7f */ }; struct MR_LD_SPAN_MAP { @@ -735,6 +943,7 @@ struct IO_REQUEST_INFO { u16 ldTgtId; u8 isRead; __le16 devHandle; + u8 pd_interface; u64 pdBlock; u8 fpOkForIo; u8 IoforUnevenSpan; @@ -743,6 +952,8 @@ struct IO_REQUEST_INFO { u64 start_row; u8 span_arm; /* span[7:5], arm[4:0] */ u8 pd_after_lb; + u16 r1_alt_dev_handle; /* raid 1/10 only */ + bool ra_capable; }; struct MR_LD_TARGET_SYNC { @@ -751,6 +962,91 @@ struct MR_LD_TARGET_SYNC { __le16 seqNum; }; +/* + * RAID Map descriptor Types. + * Each element should uniquely idetify one data structure in the RAID map + */ +enum MR_RAID_MAP_DESC_TYPE { + /* MR_DEV_HANDLE_INFO data */ + RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0x0, + /* target to Ld num Index map */ + RAID_MAP_DESC_TYPE_TGTID_INFO = 0x1, + /* MR_ARRAY_INFO data */ + RAID_MAP_DESC_TYPE_ARRAY_INFO = 0x2, + /* MR_LD_SPAN_MAP data */ + RAID_MAP_DESC_TYPE_SPAN_INFO = 0x3, + RAID_MAP_DESC_TYPE_COUNT, +}; + +/* + * This table defines the offset, size and num elements of each descriptor + * type in the RAID Map buffer + */ +struct MR_RAID_MAP_DESC_TABLE { + /* Raid map descriptor type */ + u32 raid_map_desc_type; + /* Offset into the RAID map buffer where + * descriptor data is saved + */ + u32 raid_map_desc_offset; + /* total size of the + * descriptor buffer + */ + u32 raid_map_desc_buffer_size; + /* Number of elements contained in the + * descriptor buffer + */ + u32 raid_map_desc_elements; +}; + +/* + * Dynamic Raid Map Structure. + */ +struct MR_FW_RAID_MAP_DYNAMIC { + u32 raid_map_size; /* total size of RAID Map structure */ + u32 desc_table_offset;/* Offset of desc table into RAID map*/ + u32 desc_table_size; /* Total Size of desc table */ + /* Total Number of elements in the desc table */ + u32 desc_table_num_elements; + u64 reserved1; + u32 reserved2[3]; /*future use */ + /* timeout value used by driver in FP IOs */ + u8 fp_pd_io_timeout_sec; + u8 reserved3[3]; + /* when this seqNum increments, driver needs to + * release RMW buffers asap + */ + u32 rmw_fp_seq_num; + u16 ld_count; /* count of lds. */ + u16 ar_count; /* count of arrays */ + u16 span_count; /* count of spans */ + u16 reserved4[3]; +/* + * The below structure of pointers is only to be used by the driver. + * This is added in the ,API to reduce the amount of code changes + * needed in the driver to support dynamic RAID map Firmware should + * not update these pointers while preparing the raid map + */ + union { + struct { + struct MR_DEV_HANDLE_INFO *dev_hndl_info; + u16 *ld_tgt_id_to_ld; + struct MR_ARRAY_INFO *ar_map_info; + struct MR_LD_SPAN_MAP *ld_span_map; + }; + u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT]; + }; +/* + * RAID Map descriptor table defines the layout of data in the RAID Map. + * The size of the descriptor table itself could change. + */ + /* Variable Size descriptor Table. */ + struct MR_RAID_MAP_DESC_TABLE + raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT]; + /* Variable Size buffer containing all data */ + u32 raid_map_desc_data[1]; +}; /* Dynamicaly sized RAID MAp structure */ + #define IEEE_SGE_FLAGS_ADDR_MASK (0x03) #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) @@ -759,6 +1055,16 @@ struct MR_LD_TARGET_SYNC { #define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) #define IEEE_SGE_FLAGS_END_OF_LIST (0x40) +#define MPI2_SGE_FLAGS_SHIFT (0x02) +#define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0) +#define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00) +#define IEEE_SGE_FLAGS_FORMAT_NVME (0x02) + +#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C) +#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08) +#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10) + struct megasas_register_set; struct megasas_instance; @@ -795,6 +1101,10 @@ struct megasas_cmd_fusion { u32 index; u8 pd_r1_lb; struct completion done; + u8 pd_interface; + u16 r1_alt_dev_handle; /* raid 1/10 only*/ + bool cmd_completed; /* raid 1/10 fp writes status holder */ + }; struct LD_LOAD_BALANCE_INFO { @@ -856,9 +1166,10 @@ struct MR_DRV_RAID_MAP { __le16 spanCount; __le16 reserve3; - struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; - u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; - struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT]; + struct MR_DEV_HANDLE_INFO + devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN]; + u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN]; + struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN]; struct MR_LD_SPAN_MAP ldSpanMap[1]; }; @@ -870,7 +1181,7 @@ struct MR_DRV_RAID_MAP { struct MR_DRV_RAID_MAP_ALL { struct MR_DRV_RAID_MAP raidMap; - struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1]; + struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1]; } __packed; @@ -919,7 +1230,8 @@ struct MR_PD_CFG_SEQ { u8 reserved:7; #endif } capability; - u8 reserved[3]; + u8 reserved; + u16 pd_target_id; } __packed; struct MR_PD_CFG_SEQ_NUM_SYNC { @@ -928,6 +1240,30 @@ struct MR_PD_CFG_SEQ_NUM_SYNC { struct MR_PD_CFG_SEQ seq[1]; } __packed; +/* stream detection */ +struct STREAM_DETECT { + u64 next_seq_lba; /* next LBA to match sequential access */ + struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */ + struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */ + u32 count_cmds_in_stream; /* count of host commands in this stream */ + u16 num_sges_in_group; /* total number of SGEs in grouped IOs */ + u8 is_read; /* SCSI OpCode for this stream */ + u8 group_depth; /* total number of host commands in group */ + /* TRUE if cannot add any more commands to this group */ + bool group_flush; + u8 reserved[7]; /* pad to 64-bit alignment */ +}; + +struct LD_STREAM_DETECT { + bool write_back; /* TRUE if WB, FALSE if WT */ + bool fp_write_enabled; + bool members_ssds; + bool fp_cache_bypass_capable; + u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */ + /* this is the array of stream detect structures (one per stream) */ + struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED]; +}; + struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { u64 RDPQBaseAddress; u32 Reserved1; @@ -965,7 +1301,7 @@ struct fusion_context { u8 chain_offset_io_request; u8 chain_offset_mfi_pthru; - struct MR_FW_RAID_MAP_ALL *ld_map[2]; + struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2]; dma_addr_t ld_map_phys[2]; /*Non dma-able memory. Driver local copy.*/ @@ -973,14 +1309,18 @@ struct fusion_context { u32 max_map_sz; u32 current_map_sz; + u32 old_map_sz; + u32 new_map_sz; u32 drv_map_sz; u32 drv_map_pages; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT]; dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT]; u8 fast_path_io; - struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT]; + struct LD_LOAD_BALANCE_INFO *load_balance_info; + u32 load_balance_info_pages; LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT]; u8 adapter_type; + struct LD_STREAM_DETECT **stream_detect_by_ld; }; union desc_value { |