aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/dev/smartpqi/smartpqi_cam.c586
-rw-r--r--sys/dev/smartpqi/smartpqi_cmd.c11
-rw-r--r--sys/dev/smartpqi/smartpqi_defines.h459
-rw-r--r--sys/dev/smartpqi/smartpqi_discovery.c668
-rw-r--r--sys/dev/smartpqi/smartpqi_event.c94
-rw-r--r--sys/dev/smartpqi/smartpqi_helper.c289
-rw-r--r--sys/dev/smartpqi/smartpqi_includes.h6
-rw-r--r--sys/dev/smartpqi/smartpqi_init.c455
-rw-r--r--sys/dev/smartpqi/smartpqi_intr.c151
-rw-r--r--sys/dev/smartpqi/smartpqi_ioctl.c122
-rw-r--r--sys/dev/smartpqi/smartpqi_ioctl.h25
-rw-r--r--sys/dev/smartpqi/smartpqi_main.c247
-rw-r--r--sys/dev/smartpqi/smartpqi_mem.c74
-rw-r--r--sys/dev/smartpqi/smartpqi_misc.c77
-rw-r--r--sys/dev/smartpqi/smartpqi_prototypes.h65
-rw-r--r--sys/dev/smartpqi/smartpqi_queue.c236
-rw-r--r--sys/dev/smartpqi/smartpqi_request.c461
-rw-r--r--sys/dev/smartpqi/smartpqi_response.c171
-rw-r--r--sys/dev/smartpqi/smartpqi_sis.c93
-rw-r--r--sys/dev/smartpqi/smartpqi_structures.h597
-rw-r--r--sys/dev/smartpqi/smartpqi_tag.c103
21 files changed, 3392 insertions, 1598 deletions
diff --git a/sys/dev/smartpqi/smartpqi_cam.c b/sys/dev/smartpqi/smartpqi_cam.c
index c81b5a049da6..3d39e1ffbdd4 100644
--- a/sys/dev/smartpqi/smartpqi_cam.c
+++ b/sys/dev/smartpqi/smartpqi_cam.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,11 +33,15 @@
/*
* Set cam sim properties of the smartpqi adapter.
*/
-static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
+static void
+update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
{
pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
cam_sim_softc(sim);
+
+ device_t dev = softs->os_specific.pqi_dev;
+
DBG_FUNC("IN\n");
cpi->version_num = 1;
@@ -50,9 +53,9 @@ static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
cpi->max_target = 1088;
cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE;
cpi->initiator_id = 255;
- strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
- strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
- strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN);
+ strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */
@@ -61,6 +64,11 @@ static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
cpi->transport = XPORT_SPI;
cpi->transport_version = 2;
cpi->ccb_h.status = CAM_REQ_CMP;
+ cpi->hba_vendor = pci_get_vendor(dev);
+ cpi->hba_device = pci_get_device(dev);
+ cpi->hba_subvendor = pci_get_subvendor(dev);
+ cpi->hba_subdevice = pci_get_subdevice(dev);
+
DBG_FUNC("OUT\n");
}
@@ -68,7 +76,8 @@ static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi)
/*
* Get transport settings of the smartpqi adapter
*/
-static void get_transport_settings(struct pqisrc_softstate *softs,
+static void
+get_transport_settings(struct pqisrc_softstate *softs,
struct ccb_trans_settings *cts)
{
struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
@@ -76,7 +85,7 @@ static void get_transport_settings(struct pqisrc_softstate *softs,
struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
DBG_FUNC("IN\n");
-
+
cts->protocol = PROTO_SCSI;
cts->protocol_version = SCSI_REV_SPC4;
cts->transport = XPORT_SPI;
@@ -94,8 +103,10 @@ static void get_transport_settings(struct pqisrc_softstate *softs,
/*
* Add the target to CAM layer and rescan, when a new device is found
*/
-void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
- union ccb *ccb;
+void
+os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ union ccb *ccb;
DBG_FUNC("IN\n");
@@ -121,12 +132,13 @@ void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) {
/*
* Remove the device from CAM layer when deleted or hot removed
*/
-void os_remove_device(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device) {
+void
+os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
struct cam_path *tmppath;
DBG_FUNC("IN\n");
-
+
if(softs->os_specific.sim_registered) {
if (xpt_create_path(&tmppath, NULL,
cam_sim_path(softs->os_specific.sim),
@@ -136,6 +148,7 @@ void os_remove_device(pqisrc_softstate_t *softs,
}
xpt_async(AC_LOST_DEVICE, tmppath, NULL);
xpt_free_path(tmppath);
+ softs->device_list[device->target][device->lun] = NULL;
pqisrc_free_device(softs, device);
}
@@ -146,7 +159,8 @@ void os_remove_device(pqisrc_softstate_t *softs,
/*
* Function to release the frozen simq
*/
-static void pqi_release_camq( rcb_t *rcb )
+static void
+pqi_release_camq(rcb_t *rcb)
{
pqisrc_softstate_t *softs;
struct ccb_scsiio *csio;
@@ -167,18 +181,12 @@ static void pqi_release_camq( rcb_t *rcb )
DBG_FUNC("OUT\n");
}
-/*
- * Function to dma-unmap the completed request
- */
-static void pqi_unmap_request(void *arg)
+static void
+pqi_synch_request(rcb_t *rcb)
{
- pqisrc_softstate_t *softs;
- rcb_t *rcb;
-
- DBG_IO("IN rcb = %p\n", arg);
+ pqisrc_softstate_t *softs = rcb->softs;
- rcb = (rcb_t *)arg;
- softs = rcb->softs;
+ DBG_IO("IN rcb = %p\n", rcb);
if (!(rcb->cm_flags & PQI_CMD_MAPPED))
return;
@@ -199,9 +207,21 @@ static void pqi_unmap_request(void *arg)
if(rcb->sgt && rcb->nseg)
os_mem_free(rcb->softs, (void*)rcb->sgt,
- rcb->nseg*sizeof(sgt_t));
+ rcb->nseg*sizeof(sgt_t));
- pqisrc_put_tag(&softs->taglist, rcb->tag);
+ DBG_IO("OUT\n");
+}
+
+/*
+ * Function to dma-unmap the completed request
+ */
+static inline void
+pqi_unmap_request(rcb_t *rcb)
+{
+ DBG_IO("IN rcb = %p\n", rcb);
+
+ pqi_synch_request(rcb);
+ pqisrc_put_tag(&rcb->softs->taglist, rcb->tag);
DBG_IO("OUT\n");
}
@@ -218,61 +238,103 @@ smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio)
DBG_FUNC("IN\n");
+ if (pqisrc_ctrl_offline(softs))
+ return;
+
cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ?
(uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes;
- if(cdb[0] == INQUIRY &&
+ if(cdb[0] == INQUIRY &&
(cdb[1] & SI_EVPD) == 0 &&
(csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN &&
csio->dxfer_len >= SHORT_INQUIRY_LENGTH) {
+
inq = (struct scsi_inquiry_data *)csio->data_ptr;
device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun];
/* Let the disks be probed and dealt with via CAM. Only for LD
let it fall through and inquiry be tweaked */
- if( !device || !pqisrc_is_logical_device(device) ||
- (device->devtype != DISK_DEVICE) ||
+ if (!device || !pqisrc_is_logical_device(device) ||
+ (device->devtype != DISK_DEVICE) ||
pqisrc_is_external_raid_device(device)) {
return;
}
- strncpy(inq->vendor, "MSCC",
- SID_VENDOR_SIZE);
- strncpy(inq->product,
- pqisrc_raidlevel_to_string(device->raid_level),
- SID_PRODUCT_SIZE);
+ strncpy(inq->vendor, device->vendor,
+ SID_VENDOR_SIZE);
+ strncpy(inq->product,
+ pqisrc_raidlevel_to_string(device->raid_level),
+ SID_PRODUCT_SIZE);
strncpy(inq->revision, device->volume_offline?"OFF":"OK",
- SID_REVISION_SIZE);
+ SID_REVISION_SIZE);
}
DBG_FUNC("OUT\n");
}
+static void
+pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb)
+{
+ uint32_t release_tag;
+ pqisrc_softstate_t *softs = rcb->softs;
+
+ DBG_IO("IN scsi io = %p\n", csio);
+
+ pqi_synch_request(rcb);
+ smartpqi_fix_ld_inquiry(rcb->softs, csio);
+ pqi_release_camq(rcb);
+ release_tag = rcb->tag;
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, release_tag);
+ xpt_done((union ccb *)csio);
+
+ DBG_FUNC("OUT\n");
+}
+
/*
* Handle completion of a command - pass results back through the CCB
*/
void
os_io_response_success(rcb_t *rcb)
{
- struct ccb_scsiio *csio;
+ struct ccb_scsiio *csio;
DBG_IO("IN rcb = %p\n", rcb);
- if (rcb == NULL)
+ if (rcb == NULL)
panic("rcb is null");
csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio;
- if (csio == NULL)
+ if (csio == NULL)
panic("csio is null");
rcb->status = REQUEST_SUCCESS;
csio->ccb_h.status = CAM_REQ_CMP;
- smartpqi_fix_ld_inquiry(rcb->softs, csio);
- pqi_release_camq(rcb);
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)csio);
+ pqi_complete_scsi_io(csio, rcb);
+
+ DBG_IO("OUT\n");
+}
+
+static void
+copy_sense_data_to_csio(struct ccb_scsiio *csio,
+ uint8_t *sense_data, uint16_t sense_data_len)
+{
+ DBG_IO("IN csio = %p\n", csio);
+
+ memset(&csio->sense_data, 0, csio->sense_len);
+
+ sense_data_len = (sense_data_len > csio->sense_len) ?
+ csio->sense_len : sense_data_len;
+
+ if (sense_data)
+ memcpy(&csio->sense_data, sense_data, sense_data_len);
+
+ if (csio->sense_len > sense_data_len)
+ csio->sense_resid = csio->sense_len - sense_data_len;
+ else
+ csio->sense_resid = 0;
DBG_IO("OUT\n");
}
@@ -280,7 +342,8 @@ os_io_response_success(rcb_t *rcb)
/*
* Error response handling for raid IO
*/
-void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
+void
+os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
{
struct ccb_scsiio *csio;
pqisrc_softstate_t *softs;
@@ -294,10 +357,16 @@ void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
softs = rcb->softs;
- ASSERT(err_info != NULL);
- csio->scsi_status = err_info->status;
csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ if (!err_info || !rcb->dvp) {
+ DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
+ err_info, rcb->dvp);
+ goto error_out;
+ }
+
+ csio->scsi_status = err_info->status;
+
if (csio->ccb_h.func_code == XPT_SCSI_IO) {
/*
* Handle specific SCSI status values.
@@ -305,59 +374,40 @@ void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
switch(csio->scsi_status) {
case PQI_RAID_STATUS_QUEUE_FULL:
csio->ccb_h.status = CAM_REQ_CMP;
- DBG_ERR("Queue Full error");
+ DBG_ERR("Queue Full error\n");
break;
/* check condition, sense data included */
case PQI_RAID_STATUS_CHECK_CONDITION:
{
- uint16_t sense_data_len =
- LE_16(err_info->sense_data_len);
- uint8_t *sense_data = NULL;
- if (sense_data_len)
- sense_data = err_info->data;
- memset(&csio->sense_data, 0, csio->sense_len);
- sense_data_len = (sense_data_len >
- csio->sense_len) ?
- csio->sense_len :
- sense_data_len;
- if (sense_data)
- memcpy(&csio->sense_data, sense_data,
- sense_data_len);
- if (csio->sense_len > sense_data_len)
- csio->sense_resid = csio->sense_len
- - sense_data_len;
- else
- csio->sense_resid = 0;
- csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
+ uint16_t sense_data_len =
+ LE_16(err_info->sense_data_len);
+ uint8_t *sense_data = NULL;
+ if (sense_data_len)
+ sense_data = err_info->data;
+ copy_sense_data_to_csio(csio, sense_data, sense_data_len);
+ csio->ccb_h.status = CAM_SCSI_STATUS_ERROR
| CAM_AUTOSNS_VALID
| CAM_REQ_CMP_ERR;
+
}
break;
case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
{
- uint32_t resid = 0;
- resid = rcb->bcount-err_info->data_out_transferred;
- csio->resid = resid;
- csio->ccb_h.status = CAM_REQ_CMP;
- break;
+ uint32_t resid = 0;
+ resid = rcb->bcount-err_info->data_out_transferred;
+ csio->resid = resid;
+ csio->ccb_h.status = CAM_REQ_CMP;
}
+ break;
default:
csio->ccb_h.status = CAM_REQ_CMP;
break;
}
}
- if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) {
- softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY;
- if (csio->ccb_h.status & CAM_RELEASE_SIMQ)
- xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0);
- else
- csio->ccb_h.status |= CAM_RELEASE_SIMQ;
- }
-
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)csio);
+error_out:
+ pqi_complete_scsi_io(csio, rcb);
DBG_IO("OUT\n");
}
@@ -365,14 +415,15 @@ void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info)
/*
* Error response handling for aio.
*/
-void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
+void
+os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
{
struct ccb_scsiio *csio;
pqisrc_softstate_t *softs;
DBG_IO("IN\n");
- if (rcb == NULL)
+ if (rcb == NULL)
panic("rcb is null");
rcb->status = REQUEST_SUCCESS;
@@ -382,6 +433,13 @@ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
softs = rcb->softs;
+ if (!err_info || !rcb->dvp) {
+ csio->ccb_h.status = CAM_REQ_CMP_ERR;
+ DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n",
+ err_info, rcb->dvp);
+ goto error_out;
+ }
+
switch (err_info->service_resp) {
case PQI_AIO_SERV_RESPONSE_COMPLETE:
csio->ccb_h.status = err_info->status;
@@ -402,6 +460,14 @@ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
break;
case PQI_AIO_STATUS_AIO_PATH_DISABLED:
DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n");
+ /* Timed out TMF response comes here */
+ if (rcb->tm_req) {
+ rcb->req_pending = false;
+ rcb->status = REQUEST_SUCCESS;
+ DBG_ERR("AIO Disabled for TMF\n");
+ return;
+ }
+ rcb->dvp->aio_enabled = false;
rcb->dvp->offload_enabled = false;
csio->ccb_h.status |= CAM_REQUEUE_REQ;
break;
@@ -417,18 +483,24 @@ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
break;
case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
- csio->ccb_h.status = CAM_REQ_CMP;
- break;
+ DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
+ (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED");
+ rcb->status = REQUEST_SUCCESS;
+ rcb->req_pending = false;
+ return;
case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
- DBG_WARN_BTL(rcb->dvp,"TMF rejected/Incorrect Lun\n");
- csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
- break;
+ DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n",
+ (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN");
+ rcb->status = REQUEST_FAILED;
+ rcb->req_pending = false;
+ return;
default:
DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n");
csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
break;
}
+
if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) {
csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION;
uint8_t *sense_data = NULL;
@@ -437,21 +509,12 @@ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info)
sense_data = err_info->data;
DBG_ERR_BTL(rcb->dvp, "SCSI_STATUS_CHECK_COND sense size %u\n",
sense_data_len);
- memset(&csio->sense_data, 0, csio->sense_len);
- if (sense_data)
- memcpy(&csio->sense_data, sense_data, ((sense_data_len >
- csio->sense_len) ? csio->sense_len : sense_data_len));
- if (csio->sense_len > sense_data_len)
- csio->sense_resid = csio->sense_len - sense_data_len;
- else
- csio->sense_resid = 0;
+ copy_sense_data_to_csio(csio, sense_data, sense_data_len);
csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
}
- smartpqi_fix_ld_inquiry(softs, csio);
- pqi_release_camq(rcb);
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)csio);
+error_out:
+ pqi_complete_scsi_io(csio, rcb);
DBG_IO("OUT\n");
}
@@ -470,31 +533,21 @@ pqi_freeze_ccb(union ccb *ccb)
static void
pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
- pqisrc_softstate_t *softs;
- rcb_t *rcb;
-
- rcb = (rcb_t *)arg;
- softs = rcb->softs;
+ rcb_t *rcb = (rcb_t *)arg;
+ pqisrc_softstate_t *softs = rcb->softs;
+ union ccb *ccb;
- if( error || nseg > softs->pqi_cap.max_sg_elem )
- {
- rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
- pqi_freeze_ccb(rcb->cm_ccb);
- DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
+ if (error || nseg > softs->pqi_cap.max_sg_elem) {
+ DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n",
error, nseg, softs->pqi_cap.max_sg_elem);
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)rcb->cm_ccb);
- return;
+ goto error_io;
}
- rcb->sgt = os_mem_alloc(softs, nseg * sizeof(rcb_t));
- if (rcb->sgt == NULL) {
- rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
- pqi_freeze_ccb(rcb->cm_ccb);
+ rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t));
+
+ if (!rcb->sgt) {
DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg);
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)rcb->cm_ccb);
- return;
+ goto error_io;
}
rcb->nseg = nseg;
@@ -518,38 +571,46 @@ pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
if (error) {
rcb->req_pending = false;
- rcb->cm_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
- pqi_freeze_ccb(rcb->cm_ccb);
DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error);
- pqi_unmap_request(rcb);
- xpt_done((union ccb *)rcb->cm_ccb);
+ } else {
+ /* Successfully IO was submitted to the device. */
return;
}
+
+error_io:
+ ccb = rcb->cm_ccb;
+ ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
+ pqi_freeze_ccb(ccb);
+ pqi_unmap_request(rcb);
+ xpt_done(ccb);
+ return;
}
/*
- * Function to dma-map the request buffer
+ * Function to dma-map the request buffer
*/
-static int pqi_map_request( rcb_t *rcb )
+static int
+pqi_map_request(rcb_t *rcb)
{
pqisrc_softstate_t *softs = rcb->softs;
- int error = PQI_STATUS_SUCCESS;
+ int bsd_status = BSD_SUCCESS;
union ccb *ccb = rcb->cm_ccb;
DBG_FUNC("IN\n");
/* check that mapping is necessary */
if (rcb->cm_flags & PQI_CMD_MAPPED)
- return(0);
+ return BSD_SUCCESS;
+
rcb->cm_flags |= PQI_CMD_MAPPED;
if (rcb->bcount) {
- error = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
+ bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat,
rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0);
- if (error != 0){
- DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed = %d count = %d\n",
- error, rcb->bcount);
- return error;
+ if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) {
+ DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n",
+ bsd_status, rcb->bcount);
+ return bsd_status;
}
} else {
/*
@@ -560,18 +621,21 @@ static int pqi_map_request( rcb_t *rcb )
/* Call IO functions depending on pd or ld */
rcb->status = REQUEST_PENDING;
- error = pqisrc_build_send_io(softs, rcb);
+ if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) {
+ bsd_status = EIO;
+ }
}
- DBG_FUNC("OUT error = %d\n", error);
+ DBG_FUNC("OUT error = %d\n", bsd_status);
- return error;
+ return bsd_status;
}
/*
* Function to clear the request control block
*/
-void os_reset_rcb( rcb_t *rcb )
+void
+os_reset_rcb(rcb_t *rcb)
{
rcb->error_info = NULL;
rcb->req = NULL;
@@ -582,7 +646,7 @@ void os_reset_rcb( rcb_t *rcb )
rcb->softs = NULL;
rcb->cm_flags = 0;
rcb->cm_data = NULL;
- rcb->bcount = 0;
+ rcb->bcount = 0;
rcb->nseg = 0;
rcb->sgt = NULL;
rcb->cm_ccb = NULL;
@@ -590,30 +654,39 @@ void os_reset_rcb( rcb_t *rcb )
rcb->ioaccel_handle = 0;
rcb->resp_qid = 0;
rcb->req_pending = false;
+ rcb->tm_req = false;
}
/*
* Callback function for the lun rescan
*/
-static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
+static void
+smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb)
{
xpt_free_path(ccb->ccb_h.path);
xpt_free_ccb(ccb);
}
+
/*
* Function to rescan the lun
*/
-static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
+static void
+smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
int lun)
{
- union ccb *ccb = NULL;
- cam_status status = 0;
- struct cam_path *path = NULL;
+ union ccb *ccb = NULL;
+ cam_status status = 0;
+ struct cam_path *path = NULL;
DBG_FUNC("IN\n");
ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ DBG_ERR("Unable to alloc ccb for lun rescan\n");
+ return;
+ }
+
status = xpt_create_path(&path, NULL,
cam_sim_path(softs->os_specific.sim), target, lun);
if (status != CAM_REQ_CMP) {
@@ -623,6 +696,7 @@ static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
return;
}
+ bzero(ccb, sizeof(union ccb));
xpt_setup_ccb(&ccb->ccb_h, path, 5);
ccb->ccb_h.func_code = XPT_SCAN_LUN;
ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb;
@@ -636,7 +710,8 @@ static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target,
/*
* Function to rescan the lun under each target
*/
-void smartpqi_target_rescan(struct pqisrc_softstate *softs)
+void
+smartpqi_target_rescan(struct pqisrc_softstate *softs)
{
int target = 0, lun = 0;
@@ -656,7 +731,8 @@ void smartpqi_target_rescan(struct pqisrc_softstate *softs)
/*
* Set the mode of tagged command queueing for the current task.
*/
-uint8_t os_get_task_attr(rcb_t *rcb)
+uint8_t
+os_get_task_attr(rcb_t *rcb)
{
union ccb *ccb = rcb->cm_ccb;
uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE;
@@ -679,19 +755,24 @@ uint8_t os_get_task_attr(rcb_t *rcb)
/*
* Complete all outstanding commands
*/
-void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
+void
+os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
{
int tag = 0;
+ pqi_scsi_dev_t *dvp = NULL;
DBG_FUNC("IN\n");
- for (tag = 1; tag < softs->max_outstanding_io; tag++) {
+ for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
rcb_t *prcb = &softs->rcb[tag];
+ dvp = prcb->dvp;
if(prcb->req_pending && prcb->cm_ccb ) {
prcb->req_pending = false;
prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP;
- xpt_done((union ccb *)prcb->cm_ccb);
- prcb->cm_ccb = NULL;
+ pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb);
+ if (dvp)
+ pqisrc_decrement_device_active_io(softs, dvp);
+
}
}
@@ -701,21 +782,22 @@ void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs)
/*
* IO handling functionality entry point
*/
-static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
+static int
+pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
{
rcb_t *rcb;
uint32_t tag, no_transfer = 0;
pqisrc_softstate_t *softs = (struct pqisrc_softstate *)
cam_sim_softc(sim);
- int32_t error = PQI_STATUS_FAILURE;
+ int32_t error;
pqi_scsi_dev_t *dvp;
DBG_FUNC("IN\n");
- if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) {
+ if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE;
DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id);
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
@@ -724,40 +806,40 @@ static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_SCSI_BUS_RESET
| CAM_BUSY | CAM_REQ_INPROG;
DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id);
- return error;
+ return ENXIO;
}
/* Check device state */
if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP;
DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id);
- return error;
+ return ENXIO;
}
/* Check device reset */
- if (dvp->reset_in_progress) {
+ if (DEVICE_RESET(dvp)) {
ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY;
DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id);
- return error;
+ return EBUSY;
}
if (dvp->expose_device == false) {
ccb->ccb_h.status = CAM_DEV_NOT_THERE;
DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id);
- return error;
+ return ENXIO;
}
tag = pqisrc_get_tag(&softs->taglist);
- if( tag == INVALID_ELEM ) {
+ if (tag == INVALID_ELEM) {
DBG_ERR("Get Tag failed\n");
xpt_freeze_simq(softs->os_specific.sim, 1);
softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ);
- return PQI_STATUS_FAILURE;
+ return EIO;
}
DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist);
rcb = &softs->rcb[tag];
- os_reset_rcb( rcb );
+ os_reset_rcb(rcb);
rcb->tag = tag;
rcb->softs = softs;
rcb->cmdlen = ccb->csio.cdb_len;
@@ -794,57 +876,72 @@ static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb)
* if we ever learn a transport layer other than simple, may fail
* if the adapter rejects the command).
*/
- if ((error = pqi_map_request(rcb)) != 0) {
- rcb->req_pending = false;
+ if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) {
xpt_freeze_simq(softs->os_specific.sim, 1);
- ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
if (error == EINPROGRESS) {
- DBG_WARN("In Progress on %d\n", ccb->ccb_h.target_id);
- error = 0;
+ /* Release simq in the completion */
+ softs->os_specific.pqi_flags |= PQI_FLAG_BUSY;
+ error = BSD_SUCCESS;
} else {
- ccb->ccb_h.status |= CAM_REQUEUE_REQ;
+ rcb->req_pending = false;
+ ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
DBG_WARN("Requeue req error = %d target = %d\n", error,
ccb->ccb_h.target_id);
pqi_unmap_request(rcb);
+ error = EIO;
}
}
DBG_FUNC("OUT error = %d\n", error);
+
return error;
}
+static inline int
+pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb)
+{
+ if (PQI_STATUS_SUCCESS == pqi_status &&
+ REQUEST_SUCCESS == rcb->status)
+ return BSD_SUCCESS;
+ else
+ return EIO;
+}
+
/*
* Abort a task, task management functionality
*/
static int
pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
{
- rcb_t *rcb = ccb->ccb_h.sim_priv.entries[0].ptr;
- uint32_t abort_tag = rcb->tag;
- uint32_t tag = 0;
- int rval = PQI_STATUS_SUCCESS;
- uint16_t qid;
-
- DBG_FUNC("IN\n");
+ struct ccb_hdr *ccb_h = &ccb->ccb_h;
+ rcb_t *rcb = NULL;
+ rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr;
+ uint32_t tag;
+ int rval;
- qid = (uint16_t)rcb->resp_qid;
+ DBG_FUNC("IN\n");
tag = pqisrc_get_tag(&softs->taglist);
rcb = &softs->rcb[tag];
rcb->tag = tag;
- rcb->resp_qid = qid;
- rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, abort_tag,
+ if (!rcb->dvp) {
+ DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
+ rval = ENXIO;
+ goto error_tmf;
+ }
+
+ rcb->tm_req = true;
+
+ rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb,
SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK);
- if (PQI_STATUS_SUCCESS == rval) {
- rval = rcb->status;
- if (REQUEST_SUCCESS == rval) {
- ccb->ccb_h.status = CAM_REQ_ABORTED;
- }
- }
- pqisrc_put_tag(&softs->taglist, abort_tag);
- pqisrc_put_tag(&softs->taglist,rcb->tag);
+ if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS)
+ ccb->ccb_h.status = CAM_REQ_ABORTED;
+
+error_tmf:
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, tag);
DBG_FUNC("OUT rval = %d\n", rval);
@@ -857,9 +954,10 @@ pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb)
static int
pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
{
+ struct ccb_hdr *ccb_h = &ccb->ccb_h;
rcb_t *rcb = NULL;
- uint32_t tag = 0;
- int rval = PQI_STATUS_SUCCESS;
+ uint32_t tag;
+ int rval;
DBG_FUNC("IN\n");
@@ -867,14 +965,22 @@ pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
rcb = &softs->rcb[tag];
rcb->tag = tag;
- rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, 0,
+ if (!rcb->dvp) {
+ DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code);
+ rval = ENXIO;
+ goto error_tmf;
+ }
+
+ rcb->tm_req = true;
+
+ rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL,
SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET);
- if (rval == PQI_STATUS_SUCCESS) {
- rval = rcb->status;
- }
+ rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
- pqisrc_put_tag(&softs->taglist,rcb->tag);
+error_tmf:
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, tag);
DBG_FUNC("OUT rval = %d\n", rval);
@@ -887,16 +993,17 @@ pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb)
static int
pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
{
+ struct ccb_hdr *ccb_h = &ccb->ccb_h;
pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun];
rcb_t *rcb = NULL;
- uint32_t tag = 0;
- int rval = PQI_STATUS_SUCCESS;
+ uint32_t tag;
+ int rval;
DBG_FUNC("IN\n");
if (devp == NULL) {
- DBG_ERR("bad target t%d\n", ccb->ccb_h.target_id);
- return (-1);
+ DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code);
+ return ENXIO;
}
tag = pqisrc_get_tag(&softs->taglist);
@@ -904,24 +1011,29 @@ pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb)
rcb->tag = tag;
devp->reset_in_progress = true;
- rval = pqisrc_send_tmf(softs, devp, rcb, 0,
+
+ rcb->tm_req = true;
+
+ rval = pqisrc_send_tmf(softs, devp, rcb, NULL,
SOP_TASK_MANAGEMENT_LUN_RESET);
- if (PQI_STATUS_SUCCESS == rval) {
- rval = rcb->status;
- }
+
+ rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb);
devp->reset_in_progress = false;
- pqisrc_put_tag(&softs->taglist,rcb->tag);
+
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, tag);
DBG_FUNC("OUT rval = %d\n", rval);
- return ((rval == REQUEST_SUCCESS) ?
- PQI_STATUS_SUCCESS : PQI_STATUS_FAILURE);
+ return rval;
+
}
/*
* cam entry point of the smartpqi module.
*/
-static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
+static void
+smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
{
struct pqisrc_softstate *softs = cam_sim_softc(sim);
struct ccb_hdr *ccb_h = &ccb->ccb_h;
@@ -942,7 +1054,7 @@ static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
ccg = &ccb->ccg;
if (ccg->block_size == 0) {
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
- ccb->ccb_h.status = CAM_REQ_INVALID;
+ ccb->ccb_h.status |= CAM_REQ_INVALID;
break;
}
cam_calc_geometry(ccg, /* extended */ 1);
@@ -1008,7 +1120,8 @@ static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb)
* Function to poll the response, when interrupts are unavailable
* This also serves supporting crash dump.
*/
-static void smartpqi_poll(struct cam_sim *sim)
+static void
+smartpqi_poll(struct cam_sim *sim)
{
struct pqisrc_softstate *softs = cam_sim_softc(sim);
int i;
@@ -1020,7 +1133,8 @@ static void smartpqi_poll(struct cam_sim *sim)
/*
* Function to adjust the queue depth of a device
*/
-void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
+void
+smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth)
{
struct ccb_relsim crs;
@@ -1064,6 +1178,10 @@ smartpqi_async(void *callback_arg, u_int32_t code,
if (t_id <= (PQI_CTLR_INDEX - 1)) {
if (softs != NULL) {
pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun];
+ if (dvp == NULL) {
+ DBG_ERR("Target is null, target id=%d\n", t_id);
+ break;
+ }
smartpqi_adjust_queue_depth(path,
dvp->queue_depth);
}
@@ -1080,9 +1198,9 @@ smartpqi_async(void *callback_arg, u_int32_t code,
/*
* Function to register sim with CAM layer for smartpqi driver
*/
-int register_sim(struct pqisrc_softstate *softs, int card_index)
+int
+register_sim(struct pqisrc_softstate *softs, int card_index)
{
- int error = 0;
int max_transactions;
union ccb *ccb = NULL;
cam_status status = 0;
@@ -1096,7 +1214,7 @@ int register_sim(struct pqisrc_softstate *softs, int card_index)
if (softs->os_specific.devq == NULL) {
DBG_ERR("cam_simq_alloc failed txns = %d\n",
max_transactions);
- return PQI_STATUS_FAILURE;
+ return ENOMEM;
}
sim = cam_sim_alloc(smartpqi_cam_action, \
@@ -1107,7 +1225,7 @@ int register_sim(struct pqisrc_softstate *softs, int card_index)
DBG_ERR("cam_sim_alloc failed txns = %d\n",
max_transactions);
cam_simq_free(softs->os_specific.devq);
- return PQI_STATUS_FAILURE;
+ return ENOMEM;
}
softs->os_specific.sim = sim;
@@ -1118,14 +1236,14 @@ int register_sim(struct pqisrc_softstate *softs, int card_index)
cam_sim_free(softs->os_specific.sim, FALSE);
cam_simq_free(softs->os_specific.devq);
mtx_unlock(&softs->os_specific.cam_lock);
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
softs->os_specific.sim_registered = TRUE;
ccb = xpt_alloc_ccb_nowait();
if (ccb == NULL) {
DBG_ERR("xpt_create_path failed\n");
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
if (xpt_create_path(&ccb->ccb_h.path, NULL,
@@ -1137,7 +1255,7 @@ int register_sim(struct pqisrc_softstate *softs, int card_index)
xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
cam_sim_free(softs->os_specific.sim, TRUE);
mtx_unlock(&softs->os_specific.cam_lock);
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
/*
* Callback to set the queue depth per target which is
@@ -1157,22 +1275,25 @@ int register_sim(struct pqisrc_softstate *softs, int card_index)
mtx_unlock(&softs->os_specific.cam_lock);
DBG_INFO("OUT\n");
- return error;
+
+ return BSD_SUCCESS;
}
/*
* Function to deregister smartpqi sim from cam layer
*/
-void deregister_sim(struct pqisrc_softstate *softs)
+void
+deregister_sim(struct pqisrc_softstate *softs)
{
struct ccb_setasync csa;
-
+
DBG_FUNC("IN\n");
if (softs->os_specific.mtx_init) {
mtx_lock(&softs->os_specific.cam_lock);
}
+
xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = 0;
@@ -1181,15 +1302,14 @@ void deregister_sim(struct pqisrc_softstate *softs)
xpt_action((union ccb *)&csa);
xpt_free_path(softs->os_specific.path);
- xpt_release_simq(softs->os_specific.sim, 0);
-
- xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
- softs->os_specific.sim_registered = FALSE;
-
if (softs->os_specific.sim) {
+ xpt_release_simq(softs->os_specific.sim, 0);
+ xpt_bus_deregister(cam_sim_path(softs->os_specific.sim));
+ softs->os_specific.sim_registered = FALSE;
cam_sim_free(softs->os_specific.sim, FALSE);
softs->os_specific.sim = NULL;
}
+
if (softs->os_specific.mtx_init) {
mtx_unlock(&softs->os_specific.cam_lock);
}
@@ -1205,3 +1325,27 @@ void deregister_sim(struct pqisrc_softstate *softs)
DBG_FUNC("OUT\n");
}
+
+void
+os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ struct cam_path *tmppath;
+
+ DBG_FUNC("IN\n");
+
+ if(softs->os_specific.sim_registered) {
+ if (xpt_create_path(&tmppath, NULL,
+ cam_sim_path(softs->os_specific.sim),
+ device->target, device->lun) != CAM_REQ_CMP) {
+ DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n",
+ device->bus, device->target, device->lun);
+ return;
+ }
+ xpt_async(AC_INQ_CHANGED, tmppath, NULL);
+ xpt_free_path(tmppath);
+ }
+
+ device->scsi_rescan = false;
+
+ DBG_FUNC("OUT\n");
+}
diff --git a/sys/dev/smartpqi/smartpqi_cmd.c b/sys/dev/smartpqi/smartpqi_cmd.c
index 2f1419822d4e..3c7c6311c6ab 100644
--- a/sys/dev/smartpqi/smartpqi_cmd.c
+++ b/sys/dev/smartpqi/smartpqi_cmd.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,8 +31,8 @@
* Function to submit the request to the adapter.
*/
-int pqisrc_submit_cmnd(pqisrc_softstate_t *softs,
- ib_queue_t *ib_q, void *req)
+int
+pqisrc_submit_cmnd(pqisrc_softstate_t *softs, ib_queue_t *ib_q, void *req)
{
char *slot = NULL;
uint32_t offset;
@@ -47,7 +46,7 @@ int pqisrc_submit_cmnd(pqisrc_softstate_t *softs,
/* Check queue full */
if ((ib_q->pi_local + 1) % ib_q->num_elem == *(ib_q->ci_virt_addr)) {
DBG_WARN("OUT Q full\n");
- PQI_UNLOCK(&ib_q->lock);
+ PQI_UNLOCK(&ib_q->lock);
return PQI_STATUS_QFULL;
}
@@ -70,7 +69,7 @@ int pqisrc_submit_cmnd(pqisrc_softstate_t *softs,
/* Inform the fw about the new IU */
PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
- PQI_UNLOCK(&ib_q->lock);
+ PQI_UNLOCK(&ib_q->lock);
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
}
diff --git a/sys/dev/smartpqi/smartpqi_defines.h b/sys/dev/smartpqi/smartpqi_defines.h
index b25eb087a851..94000a06a4c5 100644
--- a/sys/dev/smartpqi/smartpqi_defines.h
+++ b/sys/dev/smartpqi/smartpqi_defines.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,25 +28,34 @@
#ifndef _PQI_DEFINES_H
#define _PQI_DEFINES_H
-#define PQI_STATUS_FAILURE -1
-#define PQI_STATUS_TIMEOUT -2
-#define PQI_STATUS_QFULL -3
-#define PQI_STATUS_SUCCESS 0
+#define PQI_STATUS_FAILURE -1
+#define PQI_STATUS_TIMEOUT -2
+#define PQI_STATUS_QFULL -3
+#define PQI_STATUS_SUCCESS 0
-#define PQISRC_CMD_TIMEOUT_CNT 1200000 /* 500usec * 1200000 = 5 min */
-#define PQI_CMND_COMPLETE_TMO 1000 /* in millisecond */
+/* Maximum timeout for internal command completion */
+#define TIMEOUT_INFINITE ((uint32_t) (-1))
+#define PQISRC_CMD_TIMEOUT TIMEOUT_INFINITE
+#define PQISRC_PASSTHROUGH_CMD_TIMEOUT PQISRC_CMD_TIMEOUT
+/* Delay in milli seconds */
+#define PQISRC_TMF_TIMEOUT (OS_TMF_TIMEOUT_SEC * 1000)
+/* Delay in micro seconds */
+#define PQISRC_PENDING_IO_TIMEOUT_USEC 30000000 /* 30 seconds */
-#define INVALID_ELEM 0xffff
+/* If want to disable atomic operations on device active io, then set to zero */
+#define PQISRC_DEVICE_IO_COUNTER 1
+
+#define INVALID_ELEM 0xffff
#ifndef MIN
-#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
#ifndef MAX
-#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
#endif
-#define PQISRC_ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y))
-#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
+#define PQISRC_ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y))
+#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define ALIGN_BOUNDARY(a, n) { \
if (a % n) \
@@ -87,7 +95,7 @@
#define false 0
enum INTR_TYPE {
- LOCK_INTR,
+ LOCK_INTR,
LOCK_SLEEP
};
@@ -105,12 +113,12 @@ enum INTR_TYPE {
#define DMA_PHYS_LOW(mem) (((mem)->dma_addr) & 0x00000000ffffffff)
#define DMA_PHYS_HIGH(mem) ((((mem)->dma_addr) & 0xffffffff00000000) >> 32)
+
typedef enum REQUEST_STATUS {
REQUEST_SUCCESS = 0,
REQUEST_PENDING = -1,
REQUEST_FAILED = -2,
}REQUEST_STATUS_T;
-
typedef enum IO_PATH {
AIO_PATH,
RAID_PATH
@@ -134,6 +142,7 @@ typedef enum controller_state {
PQI_BUS_RESET,
}controller_state_t;
+
#define PQISRC_MAX_MSIX_SUPPORTED 64
/* SIS Specific */
@@ -172,10 +181,10 @@ typedef enum controller_state {
#define PQI_CTRL_KERNEL_PANIC 0x100
#define SIS_CTL_TO_HOST_DB_DISABLE_ALL 0xFFFFFFFF
-#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000
+#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000
#define SIS_CMD_SUBMIT 0x00000200 /* Bit 9 */
#define SIS_CMD_COMPLETE 0x00001000 /* Bit 12 */
-#define SIS_CMD_STATUS_SUCCESS 0x1
+#define SIS_CMD_STATUS_SUCCESS 0x1
/* PQI specific */
@@ -185,18 +194,20 @@ typedef enum controller_state {
#define PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM 16
#define PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM 16
-#define PQI_MIN_OP_IB_QUEUE_ID 1
-#define PQI_OP_EVENT_QUEUE_ID 1
-#define PQI_MIN_OP_OB_QUEUE_ID 2
+
+
+#define PQI_MIN_OP_IB_QUEUE_ID 1
+#define PQI_OP_EVENT_QUEUE_ID 1
+#define PQI_MIN_OP_OB_QUEUE_ID 2
#define PQISRC_MAX_SUPPORTED_OP_IB_Q 128
#define PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q (PQISRC_MAX_SUPPORTED_OP_IB_Q / 2)
#define PQISRC_MAX_SUPPORTED_OP_AIO_IB_Q (PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q)
-#define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q)
-#define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ
+#define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q)
+#define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ
#define PQISRC_MIN_OP_OB_QUEUE_ELEM_NUM 2
#define PQISRC_MAX_SUPPORTED_OP_OB_Q 64
-#define PQISRC_OP_MAX_IBQ_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */
+#define PQISRC_OP_MAX_IBQ_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */
#define PQISRC_OP_MIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */
#define PQISRC_OP_OBQ_ELEM_SIZE 1 /* 16 bytes */
#define PQISRC_ADMIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */
@@ -205,7 +216,7 @@ typedef enum controller_state {
#define PQISRC_SGL_SUPPORTED_BIT_MASK 0
#define PQISRC_NUM_EVENT_Q_ELEM 32
-#define PQISRC_EVENT_Q_ELEM_SIZE 32
+#define PQISRC_EVENT_Q_ELEM_SIZE 32
/* PQI Registers state status */
@@ -224,19 +235,19 @@ enum pqisrc_ctrl_mode{
};
/* PQI device performing internal initialization (e.g., POST). */
-#define PQI_DEV_STATE_POWER_ON_AND_RESET 0x0
+#define PQI_DEV_STATE_POWER_ON_AND_RESET 0x0
/* Upon entry to this state PQI device initialization begins. */
-#define PQI_DEV_STATE_PQI_STATUS_AVAILABLE 0x1
+#define PQI_DEV_STATE_PQI_STATUS_AVAILABLE 0x1
/* PQI device Standard registers are available to the driver. */
-#define PQI_DEV_STATE_ALL_REGISTERS_READY 0x2
+#define PQI_DEV_STATE_ALL_REGISTERS_READY 0x2
/* PQI device is initialized and ready to process any PCI transactions. */
-#define PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY 0x3
+#define PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY 0x3
/* The PQI Device Error register indicates the error. */
-#define PQI_DEV_STATE_ERROR 0x4
+#define PQI_DEV_STATE_ERROR 0x4
#define PQI_DEV_STATE_AT_INIT ( PQI_DEV_STATE_PQI_STATUS_AVAILABLE | \
PQI_DEV_STATE_ALL_REGISTERS_READY | \
- PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY )
+ PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY )
#define PQISRC_PQI_DEVICE_SIGNATURE "PQI DREG"
#define PQI_ADMINQ_ELEM_ARRAY_ALIGN 64
@@ -260,17 +271,38 @@ enum pqisrc_ctrl_mode{
#define PQI_CONF_TABLE_SIGNATURE "CFGTABLE"
/* PQI configuration table section IDs */
+#define PQI_CONF_TABLE_ALL_SECTIONS (-1)
#define PQI_CONF_TABLE_SECTION_GENERAL_INFO 0
#define PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES 1
#define PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA 2
#define PQI_CONF_TABLE_SECTION_DEBUG 3
#define PQI_CONF_TABLE_SECTION_HEARTBEAT 4
-#define CTRLR_HEARTBEAT_CNT(softs) LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off))
-#define PQI_NEW_HEARTBEAT_MECHANISM(softs) 1
+
+#define PQI_FIRMWARE_FEATURE_OFA 0
+#define PQI_FIRMWARE_FEATURE_SMP 1
+#define PQI_FIRMWARE_FEATURE_MAX_KNOWN 2
+#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_0 3
+#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_1_10 4
+#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_5_50 5
+#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_6_60 6
+#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_0 7
+#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_1_10 8
+#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_5_50 9
+#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_6_60 10
+#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
+#define PQI_FIRMWARE_FEATURE_SATA_WWN_FOR_DEV_UNIQUE_ID 12
+#define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT 13
+#define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT 14
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 14
+
+#define CTRLR_HEARTBEAT_CNT(softs) \
+ LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off))
+#define PQI_HEARTBEAT_TIMEOUT_SEC (10) /* 10 sec interval */
+#define PQI_HOST_WELLNESS_TIMEOUT_SEC (24*3600)
/* pqi-2r00a table 36 */
-#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000)
+#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000)
#define PQI_ADMIN_QUEUE_MSIX_ENABLE (0 << 31)
#define PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR 0x01
@@ -290,13 +322,15 @@ enum pqisrc_ctrl_mode{
#define PQI_IU_TYPE_GENERAL_ADMIN_RESPONSE 0xe0
/* PQI / Vendor specific IU */
-#define PQI_FUNCTION_REPORT_DEV_CAP 0x00
-#define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13
+#define PQI_FUNCTION_REPORT_DEV_CAP 0x00
+#define PQI_REQUEST_IU_RAID_TASK_MANAGEMENT 0x13
#define PQI_IU_TYPE_RAID_PATH_IO_REQUEST 0x14
#define PQI_IU_TYPE_AIO_PATH_IO_REQUEST 0x15
+#define PQI_REQUEST_IU_AIO_TASK_MANAGEMENT 0x16
#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
+#define PQI_REQUEST_IU_VENDOR_GENERAL 0x75
#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
#define PQI_RESPONSE_IU_TASK_MANAGEMENT 0x93
#define PQI_RESPONSE_IU_GENERAL_ADMIN 0xe0
@@ -307,6 +341,7 @@ enum pqisrc_ctrl_mode{
#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3
#define PQI_RESPONSE_IU_AIO_PATH_IS_OFF 0xf4
#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
+#define PQI_RESPONSE_IU_VENDOR_GENERAL 0xf7
#define PQI_REQUEST_HEADER_LENGTH 4
#define PQI_FUNCTION_CREATE_OPERATIONAL_IQ 0x10
#define PQI_FUNCTION_CREATE_OPERATIONAL_OQ 0x11
@@ -315,7 +350,14 @@ enum pqisrc_ctrl_mode{
#define PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP 0x14
#define PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO 1
-#define PQI_DEFAULT_IB_QUEUE 0
+#define PQI_DEFAULT_IB_QUEUE 0
+#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0
+
+#define PQI_VENDOR_RESPONSE_IU_SUCCESS 0
+#define PQI_VENDOR_RESPONSE_IU_UNSUCCESS 1
+#define PQI_VENDOR_RESPONSE_IU_INVALID_PARAM 2
+#define PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC 3
+
/* Interface macros */
#define GET_FW_STATUS(softs) \
@@ -333,19 +375,22 @@ enum pqisrc_ctrl_mode{
#define PQI_SAVE_CTRL_MODE(softs, mode) \
PCI_MEM_PUT32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0, mode)
-#define PQISRC_MAX_TARGETID 1024
-#define PQISRC_MAX_TARGETLUN 64
+#define PQISRC_MAX_TARGETID 1024
+#define PQISRC_MAX_TARGETLUN 64
/* Vendor specific IU Type for Event config Cmds */
-#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72
-#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73
-#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
-#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
-#define PQI_MANAGEMENT_CMD_RESP_TIMEOUT 3000
-#define PQISRC_EVENT_ACK_RESP_TIMEOUT 1000
+#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72
+#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73
+#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
+
+#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
+#define PQI_MANAGEMENT_CMD_RESP_TIMEOUT 3000
+#define PQISRC_EVENT_ACK_RESP_TIMEOUT 1000
+
/* Supported Event types by controller */
-#define PQI_NUM_SUPPORTED_EVENTS 7
+
+#define PQI_NUM_SUPPORTED_EVENTS 6
#define PQI_EVENT_TYPE_HOTPLUG 0x1
#define PQI_EVENT_TYPE_HARDWARE 0x2
@@ -353,18 +398,16 @@ enum pqisrc_ctrl_mode{
#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
-#define PQI_EVENT_TYPE_HEARTBEAT 0xff
/* for indexing into the pending_events[] field of struct pqisrc_softstate */
-#define PQI_EVENT_HEARTBEAT 0
-#define PQI_EVENT_HOTPLUG 1
-#define PQI_EVENT_HARDWARE 2
-#define PQI_EVENT_PHYSICAL_DEVICE 3
-#define PQI_EVENT_LOGICAL_DEVICE 4
-#define PQI_EVENT_AIO_STATE_CHANGE 5
-#define PQI_EVENT_AIO_CONFIG_CHANGE 6
+#define PQI_EVENT_HOTPLUG 0
+#define PQI_EVENT_HARDWARE 1
+#define PQI_EVENT_PHYSICAL_DEVICE 2
+#define PQI_EVENT_LOGICAL_DEVICE 3
+#define PQI_EVENT_AIO_STATE_CHANGE 4
+#define PQI_EVENT_AIO_CONFIG_CHANGE 5
+
-#define PQI_MAX_HEARTBEAT_REQUESTS 5
/* Device flags */
#define PQISRC_DFLAG_VALID (1 << 0)
@@ -375,34 +418,37 @@ enum pqisrc_ctrl_mode{
#define SG_FLAG_LAST 0x40000000
#define SG_FLAG_CHAIN 0x80000000
-#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET)
-#define DEV_GONE(dev) (!dev || (dev->invalid == true))
-#define IS_AIO_PATH(dev) (dev->aio_enabled)
-#define IS_RAID_PATH(dev) (!dev->aio_enabled)
+#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET)
+#define DEV_GONE(dev) (!dev || (dev->invalid == true))
+#define IS_AIO_PATH(dev) (dev->aio_enabled)
+#define IS_RAID_PATH(dev) (!dev->aio_enabled)
+
+#define DEVICE_RESET(dvp) (dvp->reset_in_progress)
/* SOP data direction flags */
-#define SOP_DATA_DIR_NONE 0x00
-#define SOP_DATA_DIR_FROM_DEVICE 0x01
-#define SOP_DATA_DIR_TO_DEVICE 0x02
-#define SOP_DATA_DIR_BIDIRECTIONAL 0x03
-#define SOP_PARTIAL_DATA_BUFFER 0x04
+#define SOP_DATA_DIR_NONE 0x00
+#define SOP_DATA_DIR_FROM_DEVICE 0x01
+#define SOP_DATA_DIR_TO_DEVICE 0x02
+#define SOP_DATA_DIR_BIDIRECTIONAL 0x03
+#define SOP_PARTIAL_DATA_BUFFER 0x04
-#define PQISRC_DMA_VALID (1 << 0)
-#define PQISRC_CMD_NO_INTR (1 << 1)
+#define PQISRC_DMA_VALID (1 << 0)
+#define PQISRC_CMD_NO_INTR (1 << 1)
#define SOP_TASK_ATTRIBUTE_SIMPLE 0
#define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1
#define SOP_TASK_ATTRIBUTE_ORDERED 2
#define SOP_TASK_ATTRIBUTE_ACA 4
-#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0
-#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4
+#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0
+#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4
#define SOP_TASK_MANAGEMENT_FUNCTION_FAILED 0x5
-#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8
+#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8
#define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK 0x01
#define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET 0x02
#define SOP_TASK_MANAGEMENT_LUN_RESET 0x8
+
/* Additional CDB bytes */
#define PQI_ADDITIONAL_CDB_BYTES_0 0 /* 16 byte CDB */
#define PQI_ADDITIONAL_CDB_BYTES_4 1 /* 20 byte CDB */
@@ -439,45 +485,63 @@ enum pqisrc_ctrl_mode{
#define PQI_AIO_SERV_RESPONSE_TMF_REJECTED 4
#define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5
-#define PQI_TMF_WAIT_DELAY 10000000 /* 10 seconds */
-
-#define PQI_RAID_STATUS_GOOD PQI_AIO_STATUS_GOOD
-#define PQI_RAID_STATUS_CHECK_CONDITION PQI_AIO_STATUS_CHECK_CONDITION
-#define PQI_RAID_STATUS_CONDITION_MET PQI_AIO_STATUS_CONDITION_MET
-#define PQI_RAID_STATUS_DEVICE_BUSY PQI_AIO_STATUS_DEVICE_BUSY
-#define PQI_RAID_STATUS_INT_GOOD PQI_AIO_STATUS_INT_GOOD
-#define PQI_RAID_STATUS_INT_COND_MET PQI_AIO_STATUS_INT_COND_MET
-#define PQI_RAID_STATUS_RESERV_CONFLICT PQI_AIO_STATUS_RESERV_CONFLICT
-#define PQI_RAID_STATUS_CMD_TERMINATED PQI_AIO_STATUS_CMD_TERMINATED
-#define PQI_RAID_STATUS_QUEUE_FULL PQI_AIO_STATUS_QUEUE_FULL
-#define PQI_RAID_STATUS_TASK_ABORTED PQI_AIO_STATUS_TASK_ABORTED
-#define PQI_RAID_STATUS_UNDERRUN PQI_AIO_STATUS_UNDERRUN
-#define PQI_RAID_STATUS_OVERRUN PQI_AIO_STATUS_OVERRUN
+#define PQI_TMF_WAIT_DELAY 10000000 /* 10 seconds */
+
+#define PQI_RAID_STATUS_GOOD PQI_AIO_STATUS_GOOD
+#define PQI_RAID_STATUS_CHECK_CONDITION PQI_AIO_STATUS_CHECK_CONDITION
+#define PQI_RAID_STATUS_CONDITION_MET PQI_AIO_STATUS_CONDITION_MET
+#define PQI_RAID_STATUS_DEVICE_BUSY PQI_AIO_STATUS_DEVICE_BUSY
+#define PQI_RAID_STATUS_INT_GOOD PQI_AIO_STATUS_INT_GOOD
+#define PQI_RAID_STATUS_INT_COND_MET PQI_AIO_STATUS_INT_COND_MET
+#define PQI_RAID_STATUS_RESERV_CONFLICT PQI_AIO_STATUS_RESERV_CONFLICT
+#define PQI_RAID_STATUS_CMD_TERMINATED PQI_AIO_STATUS_CMD_TERMINATED
+#define PQI_RAID_STATUS_QUEUE_FULL PQI_AIO_STATUS_QUEUE_FULL
+#define PQI_RAID_STATUS_TASK_ABORTED PQI_AIO_STATUS_TASK_ABORTED
+#define PQI_RAID_STATUS_UNDERRUN PQI_AIO_STATUS_UNDERRUN
+#define PQI_RAID_STATUS_OVERRUN PQI_AIO_STATUS_OVERRUN
/* VPD inquiry pages */
-#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
+#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define SA_VPD_PHYS_DEVICE_ID 0xc0 /* vendor-specific page */
#define SA_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
#define SA_VPD_LV_IOACCEL_STATUS 0xc2 /* vendor-specific page */
#define SA_VPD_LV_STATUS 0xc3 /* vendor-specific page */
-#define VPD_PAGE (1 << 8)
+#define VPD_PAGE (1 << 8)
+
/* logical volume states */
-#define SA_LV_OK 0x0
-#define SA_LV_NOT_AVAILABLE 0xb
-#define SA_LV_UNDERGOING_ERASE 0xf
-#define SA_LV_UNDERGOING_RPI 0x12
-#define SA_LV_PENDING_RPI 0x13
-#define SA_LV_ENCRYPTED_NO_KEY 0x14
-#define SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15
-#define SA_LV_UNDERGOING_ENCRYPTION 0x16
-#define SA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17
-#define SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18
-#define SA_LV_PENDING_ENCRYPTION 0x19
-#define SA_LV_PENDING_ENCRYPTION_REKEYING 0x1a
-#define SA_LV_STATUS_VPD_UNSUPPORTED 0xff
+#define SA_LV_OK 0x0
+#define SA_LV_FAILED 0x1
+#define SA_LV_NOT_CONFIGURED 0x2
+#define SA_LV_DEGRADED 0x3
+#define SA_LV_READY_FOR_RECOVERY 0x4
+#define SA_LV_UNDERGOING_RECOVERY 0x5
+#define SA_LV_WRONG_PHYSICAL_DRIVE_REPLACED 0x6
+#define SA_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM 0x7
+#define SA_LV_HARDWARE_OVERHEATING 0x8
+#define SA_LV_HARDWARE_HAS_OVERHEATED 0x9
+#define SA_LV_UNDERGOING_EXPANSION 0xA
+#define SA_LV_NOT_AVAILABLE 0xb
+#define SA_LV_QUEUED_FOR_EXPANSION 0xc
+#define SA_LV_DISABLED_SCSI_ID_CONFLICT 0xd
+#define SA_LV_EJECTED 0xe
+#define SA_LV_UNDERGOING_ERASE 0xf
+#define SA_LV_UNDERGOING_RPI 0x12
+#define SA_LV_PENDING_RPI 0x13
+#define SA_LV_ENCRYPTED_NO_KEY 0x14
+#define SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15
+#define SA_LV_UNDERGOING_ENCRYPTION 0x16
+#define SA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17
+#define SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18
+#define SA_LV_PENDING_ENCRYPTION 0x19
+#define SA_LV_PENDING_ENCRYPTION_REKEYING 0x1a
+#define SA_LV_STATUS_VPD_UNSUPPORTED 0xff
+
+
+/* constants for flags field of ciss_vpd_logical_volume_status */
+#define SA_LV_FLAGS_NO_HOST_IO 0x1 /* volume not available for */
/*
* assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
@@ -486,44 +550,76 @@ enum pqisrc_ctrl_mode{
/* 0 = no limit */
#define PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 0
+#define PQI_LOG_EXT_QUEUE_DEPTH_ENABLED 0x20
+#define PQI_LOG_EXT_QUEUE_ENABLE 0x56
+#define MAX_RAW_M256_QDEPTH 32512
+#define MAX_RAW_M16_QDEPTH 2032
+#define PQI_PTRAID_UPDATE_ON_RESCAN_LUNS 0x80000000
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
#define SA_CACHE_FLUSH 0x1
+#define PQISRC_INQUIRY_TIMEOUT 30
#define SA_INQUIRY 0x12
#define SA_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define SA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
#define SA_CISS_READ 0xc0
#define SA_GET_RAID_MAP 0xc8
-#define SA_REPORT_LOG_EXTENDED 0x1
-#define SA_REPORT_PHYS_EXTENDED 0x2
+#define SCSI_SENSE_RESPONSE_70 0x70
+#define SCSI_SENSE_RESPONSE_71 0x71
+#define SCSI_SENSE_RESPONSE_72 0x72
+#define SCSI_SENSE_RESPONSE_73 0x73
+
+#define SA_REPORT_LOG_EXTENDED 0x1
+#define SA_REPORT_PHYS_EXTENDED 0x2
-#define SA_CACHE_FLUSH_BUF_LEN 4
+#define SA_CACHE_FLUSH_BUF_LEN 4
-#define REPORT_LUN_DEV_FLAG_AIO_ENABLED 0x8
-#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
-#define RAID_MAP_MAX_ENTRIES 1024
-#define RAID_MAP_ENCRYPTION_ENABLED 0x1
+#define GET_SCSI_SNO(cmd) (cmd->cmdId.serialNumber)
+
+#define REPORT_LUN_DEV_FLAG_AIO_ENABLED 0x8
+#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+#define RAID_MAP_MAX_ENTRIES 1024
+#define RAID_MAP_ENCRYPTION_ENABLED 0x1
#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27
#define ASC_LUN_NOT_READY 0x4
#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x4
#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x2
+
#define OBDR_SIG_OFFSET 43
#define OBDR_TAPE_SIG "$DR-10"
#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
+
#define IOACCEL_STATUS_BYTE 4
#define OFFLOAD_CONFIGURED_BIT 0x1
#define OFFLOAD_ENABLED_BIT 0x2
-#define PQI_RAID_DATA_IN_OUT_GOOD 0x0
-#define PQI_RAID_DATA_IN_OUT_UNDERFLOW 0x1
-#define PQI_RAID_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3
-#define PQI_RAID_DATA_IN_OUT_ABORTED 0xf4
+#define PQI_RAID_DATA_IN_OUT_GOOD 0x0
+#define PQI_RAID_DATA_IN_OUT_UNDERFLOW 0x1
+#define PQI_RAID_DATA_IN_OUT_BUFFER_ERROR 0x40
+#define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW 0x41
+#define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA 0x42
+#define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE 0x43
+#define PQI_RAID_DATA_IN_OUT_PCIE_FABRIC_ERROR 0x60
+#define PQI_RAID_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT 0x61
+#define PQI_RAID_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED 0x62
+#define PQI_RAID_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ 0x63
+#define PQI_RAID_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED 0x64
+#define PQI_RAID_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST 0x65
+#define PQI_RAID_DATA_IN_OUT_PCIE_ACS_VIOLATION 0x66
+#define PQI_RAID_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED 0x67
+#define PQI_RAID_DATA_IN_OUT_ERROR 0xf0
+#define PQI_RAID_DATA_IN_OUT_PROTOCOL_ERROR 0xf1
+#define PQI_RAID_DATA_IN_OUT_HARDWARE_ERROR 0xf2
+#define PQI_RAID_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3
+#define PQI_RAID_DATA_IN_OUT_ABORTED 0xf4
+#define PQI_RAID_DATA_IN_OUT_TIMEOUT 0xf5
+
#define PQI_PHYSICAL_DEVICE_BUS 0
#define PQI_RAID_VOLUME_BUS 1
@@ -534,6 +630,7 @@ enum pqisrc_ctrl_mode{
#define TEST_UNIT_READY 0x00
#define SCSI_VPD_HEADER_LENGTH 64
+
#define PQI_MAX_MULTILUN 256
#define PQI_MAX_LOGICALS 64
#define PQI_MAX_PHYSICALS 1024
@@ -564,6 +661,12 @@ typedef enum pqisrc_device_status {
#define SA_RAID_MAX SA_RAID_ADM
#define SA_RAID_UNKNOWN 0xff
+#define BIT0 (1 << 0)
+#define BIT1 (1 << 1)
+#define BIT2 (1 << 2)
+#define BIT3 (1 << 3)
+
+#define BITS_PER_BYTE 8
/* BMIC commands */
#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
@@ -574,6 +677,9 @@ typedef enum pqisrc_device_status {
#define BMIC_CACHE_FLUSH 0xc2
#define BMIC_FLASH_FIRMWARE 0xf7
#define BMIC_WRITE_HOST_WELLNESS 0xa5
+#define BMIC_SET_DIAGS_OPTIONS 0xf4
+#define BMIC_SENSE_DIAGS_OPTIONS 0xf5
+
#define MASKED_DEVICE(lunid) ((lunid)[3] & 0xC0)
#define BMIC_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3F)
@@ -595,6 +701,10 @@ typedef enum pqisrc_device_status {
PQI_RESERVED_IO_SLOTS_TMF + \
PQI_RESERVED_IO_SLOTS_SYNC_REQUESTS)
+/* Defines for print flags */
+#define PRINT_FLAG_HDR_COLUMN 0x0001
+
+
static inline uint16_t GET_LE16(const uint8_t *p)
{
return p[0] | p[1] << 8;
@@ -651,6 +761,7 @@ static inline void PUT_BE64(uint64_t val, uint8_t *p)
#define OS_ATTRIBUTE_PACKED __attribute__((__packed__))
#define OS_ATTRIBUTE_ALIGNED(n) __attribute__((aligned(n)))
+
/* Management Interface */
#define CCISS_IOC_MAGIC 'C'
#define SMARTPQI_IOCTL_BASE 'M'
@@ -673,7 +784,7 @@ typedef struct pqi_pci_info
typedef struct _driver_info
{
unsigned char major_version;
- unsigned char minor_version;
+ unsigned long minor_version;
unsigned char release_version;
unsigned long build_revision;
unsigned long max_targets;
@@ -683,30 +794,56 @@ typedef struct _driver_info
typedef uint8_t *passthru_buf_type_t;
-#define PQISRC_DRIVER_MAJOR 1
-#define PQISRC_DRIVER_MINOR 0
-#define PQISRC_DRIVER_RELEASE 3
-#define PQISRC_DRIVER_REVISION 239
-#define STR(s) # s
-#define PQISRC_VERSION(a, b, c, d) STR(a.b.c-d)
-#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_DRIVER_MAJOR, \
- PQISRC_DRIVER_MINOR, \
- PQISRC_DRIVER_RELEASE, \
- PQISRC_DRIVER_REVISION)
+#define PQISRC_OS_VERSION 1
+#define PQISRC_FEATURE_VERSION 4014
+#define PQISRC_PATCH_VERSION 0
+#define PQISRC_BUILD_VERSION 105
+#define STR(s) # s
+#define PQISRC_VERSION(a, b, c, d) STR(a.b.c.d)
+#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_OS_VERSION, \
+ PQISRC_FEATURE_VERSION, \
+ PQISRC_PATCH_VERSION, \
+ PQISRC_BUILD_VERSION)
+
/* End Management interface */
#ifdef ASSERT
#undef ASSERT
#endif
+/*
+*os_atomic64_cas--
+*
+*Atomically read, compare, and conditionally write.
+*i.e. compare and swap.
+*retval True On Success
+*retval False On Failure
+*
+*/
+static inline boolean_t
+os_atomic64_cas(volatile uint64_t* var, uint64_t old_val, uint64_t new_val)
+{
+ return (atomic_cmpset_64(var, old_val, new_val));
+}
+
#define ASSERT(cond) {\
if (!(cond)) { \
printf("Assertion failed at file %s line %d\n",__FILE__,__LINE__); \
} \
}
+/* Atomic */
+typedef volatile uint64_t OS_ATOMIC64_T;
+#define OS_ATOMIC64_READ(p) atomic_load_acq_64(p)
+#define OS_ATOMIC64_INIT(p,val) atomic_store_rel_64(p, val)
+
+/* 64-bit post atomic increment and decrement operations on value in pointer.*/
+#define OS_ATOMIC64_DEC(p) (atomic_fetchadd_64(p, -1) - 1)
+#define OS_ATOMIC64_INC(p) (atomic_fetchadd_64(p, 1) + 1)
+
+
#define PQI_MAX_MSIX 64 /* vectors */
#define PQI_MSI_CTX_SIZE sizeof(pqi_intr_ctx)+1
#define IS_POLLING_REQUIRED(softs) if (cold) {\
@@ -742,10 +879,13 @@ typedef struct PCI_ACC_HANDLE {
#define LEGACY_SIS_IQN_H 0xd4 /* inbound queue native mode (high)*/
#define LEGACY_SIS_MAILBOX 0x7fc60 /* mailbox (20 bytes) */
#define LEGACY_SIS_SRCV_MAILBOX 0x1000 /* mailbox (20 bytes) */
+#define LEGACY_SIS_SRCV_OFFSET_MAILBOX_7 0x101C /* mailbox 7 register offset */
+
#define LEGACY_SIS_ODR_SHIFT 12 /* outbound doorbell shift */
#define LEGACY_SIS_IDR_SHIFT 9 /* inbound doorbell shift */
+
/*
* PQI Register definitions for the smartraid adapters
*/
@@ -773,8 +913,9 @@ typedef struct PCI_ACC_HANDLE {
#define OS_BUSYWAIT(x) DELAY(x)
#define OS_SLEEP(timeout) \
DELAY(timeout);
-
-#define OS_HOST_WELLNESS_TIMEOUT (24 * 3600)
+
+/* TMF request timeout is 600 Sec */
+#define OS_TMF_TIMEOUT_SEC (10 * 60)
#define LE_16(x) htole16(x)
#define LE_32(x) htole32(x)
@@ -786,6 +927,7 @@ typedef struct PCI_ACC_HANDLE {
#define PQI_HWIF_SRCV 0
#define PQI_HWIF_UNKNOWN -1
+
#define SMART_STATE_SUSPEND (1<<0)
#define SMART_STATE_UNUSED0 (1<<1)
#define SMART_STATE_INTERRUPTS_ON (1<<2)
@@ -797,6 +939,7 @@ typedef struct PCI_ACC_HANDLE {
#define PQI_SIM_REGISTERED (1<<2)
#define PQI_MTX_INIT (1<<3)
+
#define PQI_CMD_MAPPED (1<<2)
/* Interrupt context to get oq_id */
@@ -834,35 +977,48 @@ typedef struct OS_SPECIFIC {
struct cam_path *path;
struct task event_task;
struct cdev *cdev;
- struct callout wellness_periodic; /* periodic event handling */
- struct callout heartbeat_timeout_id; /* heart beat event handling */
- eventhandler_tag eh;
+ struct callout wellness_periodic; /* periodic event handling */
+ struct callout heartbeat_timeout_id; /* heart beat event handling */
} OS_SPECIFIC_T;
typedef bus_addr_t dma_addr_t;
-/* Atomic */
-typedef volatile uint64_t OS_ATOMIC64_T;
-#define OS_ATOMIC64_SET(_softs, target, val) atomic_set_long(&(_softs)->target, val)
-#define OS_ATOMIC64_READ(_softs, target) atomic_load_acq_64(&(_softs)->target)
-#define OS_ATOMIC64_INC(_softs, target) atomic_add_64(&(_softs)->target, 1)
/* Register access macros */
#define PCI_MEM_GET32( _softs, _absaddr, _offset ) \
- bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset)
-
-#define PCI_MEM_GET64( _softs, _absaddr, _offset ) \
- bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset)
+ bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset)
+
+
+#if defined(__i386__)
+#define PCI_MEM_GET64( _softs, _absaddr, _offset ) ({ \
+ (uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset) + \
+ ((uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset + 4) << 32); \
+ })
+#else
+#define PCI_MEM_GET64(_softs, _absaddr, _offset ) \
+ bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset)
+#endif
#define PCI_MEM_PUT32( _softs, _absaddr, _offset, _val ) \
- bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+#if defined(__i386__)
#define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \
- bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \
- _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val); \
+ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset + 4, _val >> 32);
+#else
+#define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \
+ bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \
+ _softs->pci_mem_handle.pqi_bhandle, _offset, _val)
+#endif
+
#define PCI_MEM_GET_BUF(_softs, _absaddr, _offset, buf, size) \
bus_space_read_region_1(_softs->pci_mem_handle.pqi_btag,\
@@ -872,7 +1028,7 @@ typedef volatile uint64_t OS_ATOMIC64_T;
typedef struct mtx OS_LOCK_T;
typedef struct sema OS_SEMA_LOCK_T;
-#define OS_PQILOCK_T OS_LOCK_T
+#define OS_PQILOCK_T OS_LOCK_T
#define OS_ACQUIRE_SPINLOCK(_lock) mtx_lock_spin(_lock)
#define OS_RELEASE_SPINLOCK(_lock) mtx_unlock_spin(_lock)
@@ -883,8 +1039,10 @@ typedef struct sema OS_SEMA_LOCK_T;
#define PQI_LOCK(_lock) OS_ACQUIRE_SPINLOCK(_lock)
#define PQI_UNLOCK(_lock) OS_RELEASE_SPINLOCK(_lock)
-#define OS_GET_CDBP(rcb) ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes)
+#define OS_GET_CDBP(rcb) \
+ ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes)
#define GET_SCSI_BUFFLEN(rcb) (rcb->cm_ccb->csio.dxfer_len)
+#define IS_OS_SCSICMD(rcb) (rcb && !rcb->tm_req && rcb->cm_ccb)
#define OS_GET_IO_QINDEX(softs,rcb) curcpu % softs->num_op_obq
#define OS_GET_IO_RESP_QID(softs,rcb) (softs->op_ob_q[(OS_GET_IO_QINDEX(softs,rcb))].q_id)
@@ -893,7 +1051,9 @@ typedef struct sema OS_SEMA_LOCK_T;
#define OS_GET_TMF_REQ_QINDEX OS_GET_IO_REQ_QINDEX
/* check request type */
-#define is_internal_req(rcb) (!(rcb)->cm_ccb)
+#define is_internal_req(rcb) (!(rcb->cm_ccb))
+
+#define os_io_memcpy(dest, src, len) memcpy(dest, src, len)
/* sg elements addr, len, flags */
#define OS_GET_IO_SG_COUNT(rcb) rcb->nseg
@@ -910,6 +1070,10 @@ typedef struct sema OS_SEMA_LOCK_T;
#define SCMD_READ_16 READ_16
#define SCMD_WRITE_16 WRITE_16
+/* FreeBSD status macros */
+#define BSD_SUCCESS 0
+
+
/* Debug facility */
#define PQISRC_LOG_LEVEL 0x60
@@ -924,6 +1088,8 @@ static int logging_level = PQISRC_LOG_LEVEL;
#define PQISRC_FLAGS_DISC 0x00000010
#define PQISRC_FLAGS_WARN 0x00000020
#define PQISRC_FLAGS_ERROR 0x00000040
+#define PQISRC_FLAGS_NOTE 0x00000080
+
#define DBG_INIT(fmt,args...) \
do { \
@@ -994,4 +1160,11 @@ static int logging_level = PQISRC_LOG_LEVEL;
} \
}while(0);
-#endif // _PQI_DEFINES_H
+#define DBG_NOTE(fmt,args...) \
+ do { \
+ if (logging_level & PQISRC_FLAGS_NOTE) { \
+ printf("[INFO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \
+ } \
+ }while(0);
+
+#endif /* _PQI_DEFINES_H */
diff --git a/sys/dev/smartpqi/smartpqi_discovery.c b/sys/dev/smartpqi/smartpqi_discovery.c
index f7d4b18bb154..cb08e5a09823 100644
--- a/sys/dev/smartpqi/smartpqi_discovery.c
+++ b/sys/dev/smartpqi/smartpqi_discovery.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +27,12 @@
#include "smartpqi_includes.h"
+#define MAX_RETRIES 3
+#define PQISRC_INQUIRY_TIMEOUT 30
+
/* Validate the scsi sense response code */
-static inline boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
+static inline
+boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
{
DBG_FUNC("IN\n");
@@ -41,8 +44,11 @@ static inline boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *
return (sshdr->response_code & 0x70) == 0x70;
}
-/* Initialize target ID pool for HBA/PDs */
-void pqisrc_init_targetid_pool(pqisrc_softstate_t *softs)
+/*
+ * Initialize target ID pool for HBA/PDs .
+ */
+void
+pqisrc_init_targetid_pool(pqisrc_softstate_t *softs)
{
int i, tid = PQI_MAX_PHYSICALS + PQI_MAX_LOGICALS - 1;
@@ -52,29 +58,33 @@ void pqisrc_init_targetid_pool(pqisrc_softstate_t *softs)
softs->tid_pool.index = i - 1;
}
-int pqisrc_alloc_tid(pqisrc_softstate_t *softs)
+int
+pqisrc_alloc_tid(pqisrc_softstate_t *softs)
{
+
if(softs->tid_pool.index <= -1) {
DBG_ERR("Target ID exhausted\n");
return INVALID_ELEM;
}
-
+
return softs->tid_pool.tid[softs->tid_pool.index--];
}
-void pqisrc_free_tid(pqisrc_softstate_t *softs, int tid)
+void
+pqisrc_free_tid(pqisrc_softstate_t *softs, int tid)
{
- if(softs->tid_pool.index >= PQI_MAX_PHYSICALS) {
- DBG_ERR("Target ID queue is full\n");
- return;
- }
+ if(softs->tid_pool.index >= (PQI_MAX_PHYSICALS - 1)) {
+ DBG_ERR("Target ID queue is full\n");
+ return;
+ }
softs->tid_pool.index++;
softs->tid_pool.tid[softs->tid_pool.index] = tid;
}
/* Update scsi sense info to a local buffer*/
-boolean_t pqisrc_update_scsi_sense(const uint8_t *buff, int len,
+boolean_t
+pqisrc_update_scsi_sense(const uint8_t *buff, int len,
struct sense_header_scsi *header)
{
@@ -122,7 +132,8 @@ boolean_t pqisrc_update_scsi_sense(const uint8_t *buff, int len,
/*
* Function used to build the internal raid request and analyze the response
*/
-int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
+int
+pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr,
raid_path_error_info_elem_t *error_info)
{
@@ -160,6 +171,7 @@ int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t
sgd->addr = device_mem.dma_addr;
sgd->len = datasize;
sgd->flags = SG_FLAG_LAST;
+
}
/* Build raid path request */
@@ -183,6 +195,9 @@ int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t
cdb[2] = (uint8_t)vpd_page;
}
cdb[4] = (uint8_t)datasize;
+ if (softs->timeout_in_passthrough) {
+ request->timeout_in_sec = PQISRC_INQUIRY_TIMEOUT;
+ }
break;
case SA_REPORT_LOG:
case SA_REPORT_PHYS:
@@ -195,6 +210,13 @@ int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t
cdb[8] = (uint8_t)((datasize) >> 8);
cdb[9] = (uint8_t)datasize;
break;
+ case PQI_LOG_EXT_QUEUE_ENABLE:
+ request->data_direction = SOP_DATA_DIR_TO_DEVICE;
+ cdb[0] = SA_REPORT_LOG;
+ cdb[1] = (PQI_LOG_EXT_QUEUE_DEPTH_ENABLED | SA_REPORT_LOG_EXTENDED);
+ cdb[8] = (uint8_t)((datasize) >> 8);
+ cdb[9] = (uint8_t)datasize;
+ break;
case TEST_UNIT_READY:
request->data_direction = SOP_DATA_DIR_NONE;
break;
@@ -235,10 +257,11 @@ int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t
cdb[6] = cmd;
cdb[7] = (uint8_t)((datasize) << 8);
cdb[8] = (uint8_t)((datasize) >> 8);
- break;
+ break;
default:
DBG_ERR("unknown command 0x%x", cmd);
- break;
+ ret = PQI_STATUS_FAILURE;
+ return ret;
}
tag = pqisrc_get_tag(&softs->taglist);
@@ -265,7 +288,7 @@ int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t
goto err_out;
}
- ret = pqisrc_wait_on_condition(softs, rcb);
+ ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd);
goto err_out;
@@ -281,7 +304,7 @@ int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t
ret = rcb->status;
if (ret) {
if(error_info) {
- memcpy(error_info,
+ memcpy(error_info,
rcb->error_info,
sizeof(*error_info));
@@ -290,9 +313,9 @@ int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t
ret = PQI_STATUS_SUCCESS;
}
else{
- DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x,"
- "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr),
- BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
+ DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x,"
+ "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr),
+ BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
cmd, ret);
ret = PQI_STATUS_FAILURE;
}
@@ -310,8 +333,8 @@ int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t
return ret;
err_out:
- DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n",
- BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
+ DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n",
+ BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
cmd, ret);
os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
@@ -323,7 +346,8 @@ err_notag:
}
/* common function used to send report physical and logical luns cmnds*/
-static int pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
+static int
+pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
void *buff, size_t buf_len)
{
int ret;
@@ -332,7 +356,7 @@ static int pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
DBG_FUNC("IN\n");
memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, buff,
+ ret = pqisrc_build_send_raid_request(softs, &request, buff,
buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
DBG_FUNC("OUT\n");
@@ -341,7 +365,8 @@ static int pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
}
/* subroutine used to get physical and logical luns of the device */
-static int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
+int
+pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
reportlun_data_ext_t **buff, size_t *data_length)
{
int ret;
@@ -372,7 +397,7 @@ retry:
DBG_ERR("failed to allocate memory for lun_data\n");
return PQI_STATUS_FAILURE;
}
-
+
if (list_len == 0) {
DBG_DISC("list_len is 0\n");
memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
@@ -406,11 +431,78 @@ error:
}
/*
+ * Function used to grab queue depth ext lun data for logical devices
+ */
+static int
+pqisrc_get_queue_lun_list(pqisrc_softstate_t *softs, uint8_t cmd,
+ reportlun_queue_depth_data_t **buff, size_t *data_length)
+{
+ int ret;
+ size_t list_len;
+ size_t data_len;
+ size_t new_lun_list_length;
+ reportlun_queue_depth_data_t *lun_data;
+ reportlun_header_t report_lun_header;
+
+ DBG_FUNC("IN\n");
+
+ ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
+ sizeof(report_lun_header));
+
+ if (ret) {
+ DBG_ERR("failed return code: %d\n", ret);
+ return ret;
+ }
+ list_len = BE_32(report_lun_header.list_length);
+retry:
+ data_len = sizeof(reportlun_header_t) + list_len;
+ *data_length = data_len;
+ lun_data = os_mem_alloc(softs, data_len);
+
+ if (!lun_data) {
+ DBG_ERR("failed to allocate memory for lun_data\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+ if (list_len == 0) {
+ DBG_INFO("list_len is 0\n");
+ memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
+ goto out;
+ }
+ ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
+
+ if (ret) {
+ DBG_ERR("error\n");
+ goto error;
+ }
+ new_lun_list_length = BE_32(lun_data->header.list_length);
+
+ if (new_lun_list_length > list_len) {
+ list_len = new_lun_list_length;
+ os_mem_free(softs, (void *)lun_data, data_len);
+ goto retry;
+ }
+
+out:
+ *buff = lun_data;
+ DBG_FUNC("OUT\n");
+ return 0;
+
+error:
+ os_mem_free(softs, (void *)lun_data, data_len);
+ DBG_ERR("FAILED\n");
+ return ret;
+}
+
+/*
* Function used to get physical and logical device list
*/
-static int pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
+static int
+pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
reportlun_data_ext_t **physical_dev_list,
- reportlun_data_ext_t **logical_dev_list,
+ reportlun_data_ext_t **logical_dev_list,
+ reportlun_queue_depth_data_t **queue_dev_list,
+ size_t *queue_data_length,
size_t *phys_data_length,
size_t *log_data_length)
{
@@ -436,6 +528,12 @@ static int pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
return ret;
}
+ ret = pqisrc_get_queue_lun_list(softs, PQI_LOG_EXT_QUEUE_ENABLE, queue_dev_list, queue_data_length);
+ if (ret) {
+ DBG_ERR("report logical LUNs failed");
+ return ret;
+ }
+
logdev_data = *logical_dev_list;
if (logdev_data) {
@@ -477,7 +575,8 @@ static int pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
}
/* Subroutine used to set Bus-Target-Lun for the requested device */
-static inline void pqisrc_set_btl(pqi_scsi_dev_t *device,
+static inline void
+pqisrc_set_btl(pqi_scsi_dev_t *device,
int bus, int target, int lun)
{
DBG_FUNC("IN\n");
@@ -489,7 +588,8 @@ static inline void pqisrc_set_btl(pqi_scsi_dev_t *device,
DBG_FUNC("OUT\n");
}
-inline boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
+inline
+boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
{
return device->is_external_raid_device;
}
@@ -500,7 +600,8 @@ static inline boolean_t pqisrc_is_external_raid_addr(uint8_t *scsi3addr)
}
/* Function used to assign Bus-Target-Lun for the requested device */
-static void pqisrc_assign_btl(pqi_scsi_dev_t *device)
+static void
+pqisrc_assign_btl(pqi_scsi_dev_t *device)
{
uint8_t *scsi3addr;
uint32_t lunid;
@@ -514,7 +615,7 @@ static void pqisrc_assign_btl(pqi_scsi_dev_t *device)
if (pqisrc_is_hba_lunid(scsi3addr)) {
/* The specified device is the controller. */
- pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, lunid & 0x3fff);
+ pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, (lunid & 0x3fff) + 1);
device->target_lun_valid = true;
return;
}
@@ -527,8 +628,8 @@ static void pqisrc_assign_btl(pqi_scsi_dev_t *device)
lun = lunid & 0xff;
} else {
bus = PQI_RAID_VOLUME_BUS;
- lun = 0;
- target = lunid & 0x3fff;
+ lun = (lunid & 0x3fff) + 1;
+ target = 0;
}
pqisrc_set_btl(device, bus, target, lun);
device->target_lun_valid = true;
@@ -539,7 +640,8 @@ static void pqisrc_assign_btl(pqi_scsi_dev_t *device)
}
/* Build and send the internal INQUIRY command to particular device */
-static int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
+int
+pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len)
{
int ret = PQI_STATUS_SUCCESS;
@@ -549,15 +651,17 @@ static int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
DBG_FUNC("IN\n");
memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
+ ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
SA_INQUIRY, vpd_page, scsi3addr, &error_info);
DBG_FUNC("OUT\n");
return ret;
}
+#if 0
/* Function used to parse the sense information from response */
-static void pqisrc_fetch_sense_info(const uint8_t *sense_data,
+static void
+pqisrc_fetch_sense_info(const uint8_t *sense_data,
unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq)
{
struct sense_header_scsi header;
@@ -578,132 +682,65 @@ static void pqisrc_fetch_sense_info(const uint8_t *sense_data,
DBG_FUNC("OUT\n");
}
+#endif
-/* Function used to validate volume offline status */
-static uint8_t pqisrc_get_volume_offline_status(pqisrc_softstate_t *softs,
- uint8_t *scsi3addr)
+/* Determine logical volume status from vpd buffer.*/
+static void pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device)
{
- int ret = PQI_STATUS_SUCCESS;
+ int ret;
uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED;
- uint8_t size;
- uint8_t *buff = NULL;
+ uint8_t vpd_size = sizeof(vpd_volume_status);
+ uint8_t offline = true;
+ size_t page_length;
+ vpd_volume_status *vpd;
DBG_FUNC("IN\n");
- buff = os_mem_alloc(softs, 64);
- if (!buff)
- return PQI_STATUS_FAILURE;
+ vpd = os_mem_alloc(softs, vpd_size);
+ if (vpd == NULL)
+ goto out;
/* Get the size of the VPD return buff. */
- ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
- buff, SCSI_VPD_HEADER_LENGTH);
+ ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
+ (uint8_t *)vpd, vpd_size);
- if (ret)
+ if (ret) {
+ DBG_WARN("Inquiry returned failed status\n");
goto out;
+ }
- size = buff[3];
+ if (vpd->page_code != SA_VPD_LV_STATUS) {
+ DBG_WARN("Returned invalid buffer\n");
+ goto out;
+ }
- /* Now get the whole VPD buff. */
- ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
- buff, size + SCSI_VPD_HEADER_LENGTH);
- if (ret)
+ page_length = offsetof(vpd_volume_status, volume_status) + vpd->page_length;
+ if (page_length < vpd_size)
goto out;
- status = buff[4];
+ status = vpd->volume_status;
+ offline = (vpd->flags & SA_LV_FLAGS_NO_HOST_IO)!=0;
out:
- os_mem_free(softs, (char *)buff, 64);
- DBG_FUNC("OUT\n");
+ device->volume_offline = offline;
+ device->volume_status = status;
- return status;
-}
-
-/* Determine offline status of a volume. Returns appropriate SA_LV_* status.*/
-static uint8_t pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
- uint8_t *scsi3addr)
-{
- int ret = PQI_STATUS_SUCCESS;
- uint8_t *sense_data;
- unsigned sense_data_len;
- uint8_t sense_key;
- uint8_t asc;
- uint8_t ascq;
- uint8_t off_status;
- uint8_t scsi_status;
- pqisrc_raid_req_t request;
- raid_path_error_info_elem_t error_info;
-
- DBG_FUNC("IN\n");
-
- memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, NULL, 0,
- TEST_UNIT_READY, 0, scsi3addr, &error_info);
-
- if (ret)
- goto error;
- sense_data = error_info.data;
- sense_data_len = LE_16(error_info.sense_data_len);
-
- if (sense_data_len > sizeof(error_info.data))
- sense_data_len = sizeof(error_info.data);
-
- pqisrc_fetch_sense_info(sense_data, sense_data_len, &sense_key, &asc,
- &ascq);
-
- scsi_status = error_info.status;
-
- /* scsi status: "CHECK CONDN" / SK: "not ready" ? */
- if (scsi_status != 2 ||
- sense_key != 2 ||
- asc != ASC_LUN_NOT_READY) {
- return SA_LV_OK;
- }
-
- /* Determine the reason for not ready state. */
- off_status = pqisrc_get_volume_offline_status(softs, scsi3addr);
-
- DBG_DISC("offline_status 0x%x\n", off_status);
-
- /* Keep volume offline in certain cases. */
- switch (off_status) {
- case SA_LV_UNDERGOING_ERASE:
- case SA_LV_NOT_AVAILABLE:
- case SA_LV_UNDERGOING_RPI:
- case SA_LV_PENDING_RPI:
- case SA_LV_ENCRYPTED_NO_KEY:
- case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
- case SA_LV_UNDERGOING_ENCRYPTION:
- case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
- case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
- return off_status;
- case SA_LV_STATUS_VPD_UNSUPPORTED:
- /*
- * If the VPD status page isn't available,
- * use ASC/ASCQ to determine state.
- */
- if (ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS ||
- ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)
- return off_status;
- break;
- }
+ os_mem_free(softs, (char *)vpd, vpd_size);
DBG_FUNC("OUT\n");
- return SA_LV_OK;
-
-error:
- return SA_LV_STATUS_VPD_UNSUPPORTED;
+ return;
}
/* Validate the RAID map parameters */
-static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
+static int
+pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map)
{
char *error_msg;
uint32_t raidmap_size;
uint32_t r5or6_blocks_per_row;
- unsigned phys_dev_num;
- unsigned num_raidmap_entries;
DBG_FUNC("IN\n");
@@ -713,21 +750,11 @@ static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
goto error;
}
- if (raidmap_size > sizeof(*raid_map)) {
- error_msg = "RAID map too large\n";
- goto error;
- }
-
+#if 0
phys_dev_num = LE_16(raid_map->layout_map_count) *
- (LE_16(raid_map->data_disks_per_row) +
- LE_16(raid_map->metadata_disks_per_row));
- num_raidmap_entries = phys_dev_num *
- LE_16(raid_map->row_cnt);
-
- if (num_raidmap_entries > RAID_MAP_MAX_ENTRIES) {
- error_msg = "invalid number of map entries in RAID map\n";
- goto error;
- }
+ (LE_16(raid_map->data_disks_per_row) +
+ LE_16(raid_map->metadata_disks_per_row));
+#endif
if (device->raid_level == SA_RAID_1) {
if (LE_16(raid_map->layout_map_count) != 2) {
@@ -736,7 +763,7 @@ static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
}
} else if (device->raid_level == SA_RAID_ADM) {
if (LE_16(raid_map->layout_map_count) != 3) {
- error_msg = "invalid RAID-1(ADM) map\n";
+ error_msg = "invalid RAID-1(triple) map\n";
goto error;
}
} else if ((device->raid_level == SA_RAID_5 ||
@@ -757,15 +784,17 @@ static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
return 0;
error:
- DBG_ERR("%s\n", error_msg);
+ DBG_NOTE("%s\n", error_msg);
return PQI_STATUS_FAILURE;
}
/* Get device raidmap for the requested device */
-static int pqisrc_get_device_raidmap(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static int
+pqisrc_get_device_raidmap(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
int ret = PQI_STATUS_SUCCESS;
+ int raidmap_size;
+
pqisrc_raid_req_t request;
pqisrc_raid_map_t *raid_map;
@@ -776,7 +805,7 @@ static int pqisrc_get_device_raidmap(pqisrc_softstate_t *softs,
return PQI_STATUS_FAILURE;
memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map),
+ ret = pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map),
SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
if (ret) {
@@ -784,9 +813,33 @@ static int pqisrc_get_device_raidmap(pqisrc_softstate_t *softs,
goto err_out;
}
+ raidmap_size = LE_32(raid_map->structure_size);
+ if (raidmap_size > sizeof(*raid_map)) {
+ DBG_NOTE("Raid map is larger than 1024 entries, request once again");
+ os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
+
+ raid_map = os_mem_alloc(softs, raidmap_size);
+ if (!raid_map)
+ return PQI_STATUS_FAILURE;
+ memset(&request, 0, sizeof(request));
+
+ ret = pqisrc_build_send_raid_request(softs, &request, raid_map, raidmap_size,
+ SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
+ if (ret) {
+ DBG_ERR("error in build send raid req ret=%d\n", ret);
+ goto err_out;
+ }
+
+ if(LE_32(raid_map->structure_size) != raidmap_size) {
+ DBG_WARN("Expected raid map size %d bytes and got %d bytes\n",
+ raidmap_size,LE_32(raid_map->structure_size));
+ goto err_out;
+ }
+ }
+
ret = pqisrc_raid_map_validation(softs, device, raid_map);
if (ret) {
- DBG_ERR("error in raid map validation ret=%d\n", ret);
+ DBG_NOTE("error in raid map validation ret=%d\n", ret);
goto err_out;
}
@@ -801,7 +854,8 @@ err_out:
}
/* Get device ioaccel_status to validate the type of device */
-static void pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
+static void
+pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device)
{
int ret = PQI_STATUS_SUCCESS;
@@ -832,7 +886,7 @@ static void pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
device->offload_enabled_pending = false;
}
- DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n",
+ DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n",
device->offload_config, device->offload_enabled_pending);
err_out:
@@ -841,8 +895,8 @@ err_out:
}
/* Get RAID level of requested device */
-static void pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static void
+pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
uint8_t raid_level;
uint8_t *buff;
@@ -870,11 +924,12 @@ static void pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs,
}
/* Parse the inquiry response and determine the type of device */
-static int pqisrc_get_dev_data(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static int
+pqisrc_get_dev_data(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
int ret = PQI_STATUS_SUCCESS;
uint8_t *inq_buff;
+ int retry = MAX_RETRIES;
DBG_FUNC("IN\n");
@@ -882,10 +937,15 @@ static int pqisrc_get_dev_data(pqisrc_softstate_t *softs,
if (!inq_buff)
return PQI_STATUS_FAILURE;
- /* Send an inquiry to the device to see what it is. */
- ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
- OBDR_TAPE_INQ_SIZE);
- if (ret)
+ while(retry--) {
+ /* Send an inquiry to the device to see what it is. */
+ ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
+ OBDR_TAPE_INQ_SIZE);
+ if (!ret)
+ break;
+ DBG_WARN("Retrying inquiry !!!\n");
+ }
+ if(retry <= 0)
goto err_out;
pqisrc_sanitize_inquiry_string(&inq_buff[8], 8);
pqisrc_sanitize_inquiry_string(&inq_buff[16], 16);
@@ -895,20 +955,18 @@ static int pqisrc_get_dev_data(pqisrc_softstate_t *softs,
sizeof(device->vendor));
memcpy(device->model, &inq_buff[16],
sizeof(device->model));
- DBG_DISC("DEV_TYPE: %x VENDOR: %s MODEL: %s\n", device->devtype, device->vendor, device->model);
+ DBG_DISC("DEV_TYPE: %x VENDOR: %.8s MODEL: %.16s\n", device->devtype, device->vendor, device->model);
if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) {
if (pqisrc_is_external_raid_device(device)) {
device->raid_level = SA_RAID_UNKNOWN;
device->volume_status = SA_LV_OK;
device->volume_offline = false;
- }
+ }
else {
pqisrc_get_dev_raid_level(softs, device);
pqisrc_get_dev_ioaccel_status(softs, device);
- device->volume_status = pqisrc_get_dev_vol_status(softs,
- device->scsi3addr);
- device->volume_offline = device->volume_status != SA_LV_OK;
+ pqisrc_get_dev_vol_status(softs, device);
}
}
@@ -930,16 +988,16 @@ err_out:
* BMIC (Basic Management And Interface Commands) command
* to get the controller identify params
*/
-static int pqisrc_identify_ctrl(pqisrc_softstate_t *softs,
- bmic_ident_ctrl_t *buff)
+static int
+pqisrc_identify_ctrl(pqisrc_softstate_t *softs, bmic_ident_ctrl_t *buff)
{
int ret = PQI_STATUS_SUCCESS;
pqisrc_raid_req_t request;
DBG_FUNC("IN\n");
- memset(&request, 0, sizeof(request));
- ret = pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff),
+ memset(&request, 0, sizeof(request));
+ ret = pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff),
BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
DBG_FUNC("OUT\n");
@@ -947,7 +1005,8 @@ static int pqisrc_identify_ctrl(pqisrc_softstate_t *softs,
}
/* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */
-int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
+int
+pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
bmic_ident_ctrl_t *identify_ctrl;
@@ -965,7 +1024,7 @@ int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
ret = pqisrc_identify_ctrl(softs, identify_ctrl);
if (ret)
goto out;
-
+
softs->fw_build_number = identify_ctrl->fw_build_number;
memcpy(softs->fw_version, identify_ctrl->fw_version,
sizeof(identify_ctrl->fw_version));
@@ -976,13 +1035,14 @@ int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
"-%u", identify_ctrl->fw_build_number);
out:
os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl));
- DBG_INIT("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
+ DBG_NOTE("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
DBG_FUNC("OUT\n");
return ret;
}
/* BMIC command to determine scsi device identify params */
-static int pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
+static int
+pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device,
bmic_ident_physdev_t *buff,
int buf_len)
@@ -991,14 +1051,15 @@ static int pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
uint16_t bmic_device_index;
pqisrc_raid_req_t request;
+
DBG_FUNC("IN\n");
- memset(&request, 0, sizeof(request));
+ memset(&request, 0, sizeof(request));
bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr);
request.cdb[2] = (uint8_t)bmic_device_index;
request.cdb[9] = (uint8_t)(bmic_device_index >> 8);
- ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
+ ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
DBG_FUNC("OUT\n");
return ret;
@@ -1008,7 +1069,8 @@ static int pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
* Function used to get the scsi device information using one of BMIC
* BMIC_IDENTIFY_PHYSICAL_DEVICE
*/
-static void pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
+static void
+pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device,
bmic_ident_physdev_t *id_phys)
{
@@ -1041,10 +1103,11 @@ static void pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
DBG_FUNC("OUT\n");
}
+
/* Function used to find the entry of the device in a list */
-static device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device_to_find,
- pqi_scsi_dev_t **same_device)
+static
+device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device_to_find, pqi_scsi_dev_t **same_device)
{
pqi_scsi_dev_t *device;
int i,j;
@@ -1071,10 +1134,11 @@ static device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
return DEVICE_NOT_FOUND;
}
+
/* Update the newly added devices as existed device */
-static void pqisrc_exist_device_update(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device_exist,
- pqi_scsi_dev_t *new_device)
+static void
+pqisrc_exist_device_update(pqisrc_softstate_t *softs,
+ pqi_scsi_dev_t *device_exist, pqi_scsi_dev_t *new_device)
{
DBG_FUNC("IN\n");
device_exist->expose_device = new_device->expose_device;
@@ -1085,6 +1149,13 @@ static void pqisrc_exist_device_update(pqisrc_softstate_t *softs,
device_exist->is_physical_device = new_device->is_physical_device;
device_exist->is_external_raid_device =
new_device->is_external_raid_device;
+
+ if ((device_exist->volume_status == SA_LV_QUEUED_FOR_EXPANSION ||
+ device_exist->volume_status == SA_LV_UNDERGOING_EXPANSION) &&
+ new_device->volume_status == SA_LV_OK) {
+ device_exist->scsi_rescan = true;
+ }
+
device_exist->sas_address = new_device->sas_address;
device_exist->raid_level = new_device->raid_level;
device_exist->queue_depth = new_device->queue_depth;
@@ -1098,7 +1169,6 @@ static void pqisrc_exist_device_update(pqisrc_softstate_t *softs,
memcpy(device_exist->phys_connector, new_device->phys_connector,
sizeof(device_exist->phys_connector));
device_exist->offload_config = new_device->offload_config;
- device_exist->offload_enabled = false;
device_exist->offload_enabled_pending =
new_device->offload_enabled_pending;
device_exist->offload_to_mirror = 0;
@@ -1113,12 +1183,13 @@ static void pqisrc_exist_device_update(pqisrc_softstate_t *softs,
}
/* Validate the ioaccel_handle for a newly added device */
-static pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
+static
+pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
pqisrc_softstate_t *softs, uint32_t ioaccel_handle)
{
pqi_scsi_dev_t *device;
int i,j;
- DBG_FUNC("IN\n");
+ DBG_FUNC("IN\n");
for(i = 0; i < PQI_MAX_DEVICES; i++) {
for(j = 0; j < PQI_MAX_MULTILUN; j++) {
if(softs->device_list[i][j] == NULL)
@@ -1138,7 +1209,8 @@ static pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
}
/* Get the scsi device queue depth */
-static void pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
+static void
+pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
{
unsigned i;
unsigned phys_dev_num;
@@ -1202,16 +1274,17 @@ static void pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
}
/* Function used to add a scsi device to OS scsi subsystem */
-static int pqisrc_add_device(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static int
+pqisrc_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
- DBG_DISC("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
+ DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
device->invalid = false;
if(device->expose_device) {
+ pqisrc_init_device_active_io(softs, device);
/* TBD: Call OS upper layer function to add the device entry */
os_add_device(softs,device);
}
@@ -1221,15 +1294,26 @@ static int pqisrc_add_device(pqisrc_softstate_t *softs,
}
/* Function used to remove a scsi device from OS scsi subsystem */
-void pqisrc_remove_device(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+void
+pqisrc_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
- DBG_DISC("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
+ DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
- /* TBD: Call OS upper layer function to remove the device entry */
device->invalid = true;
+ if (device->expose_device == false) {
+ /*Masked physical devices are not been exposed to storage stack.
+ *Hence, free the masked device resources such as
+ *device memory, Target ID,etc., here.
+ */
+ DBG_NOTE("Deallocated Masked Device Resources.\n");
+ pqisrc_free_device(softs,device);
+ return;
+ }
+ /* Wait for device outstanding Io's */
+ pqisrc_wait_for_device_commands_to_complete(softs, device);
+ /* Call OS upper layer function to remove the exposed device entry */
os_remove_device(softs,device);
DBG_FUNC("OUT\n");
}
@@ -1238,8 +1322,8 @@ void pqisrc_remove_device(pqisrc_softstate_t *softs,
* When exposing new device to OS fails then adjst list according to the
* mid scsi list
*/
-static void pqisrc_adjust_list(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static void
+pqisrc_adjust_list(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
@@ -1257,8 +1341,8 @@ static void pqisrc_adjust_list(pqisrc_softstate_t *softs,
}
/* Debug routine used to display the RAID volume status of the device */
-static void pqisrc_display_volume_status(pqisrc_softstate_t *softs,
- pqi_scsi_dev_t *device)
+static void
+pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
char *status;
@@ -1303,6 +1387,29 @@ static void pqisrc_display_volume_status(pqisrc_softstate_t *softs,
case SA_LV_STATUS_VPD_UNSUPPORTED:
status = "Volume status is not available through vital product data pages.";
break;
+ case SA_LV_UNDERGOING_EXPANSION:
+ status = "Volume undergoing expansion";
+ break;
+ case SA_LV_QUEUED_FOR_EXPANSION:
+ status = "Volume queued for expansion";
+ case SA_LV_EJECTED:
+ status = "Volume ejected";
+ break;
+ case SA_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
+ status = "Volume has wrong physical drive replaced";
+ break;
+ case SA_LV_DISABLED_SCSI_ID_CONFLICT:
+ status = "Volume disabled scsi id conflict";
+ break;
+ case SA_LV_HARDWARE_HAS_OVERHEATED:
+ status = "Volume hardware has over heated";
+ break;
+ case SA_LV_HARDWARE_OVERHEATING:
+ status = "Volume hardware over heating";
+ break;
+ case SA_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
+ status = "Volume physical drive connection problem";
+ break;
default:
status = "Volume is in an unknown state.";
break;
@@ -1313,7 +1420,8 @@ static void pqisrc_display_volume_status(pqisrc_softstate_t *softs,
DBG_FUNC("OUT\n");
}
-void pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+void
+pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
{
DBG_FUNC("IN\n");
if (!device)
@@ -1327,20 +1435,41 @@ void pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
}
/* OS should call this function to free the scsi device */
-void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
+void
+pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
{
+ rcb_t *rcb;
+ int i;
- OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
- if (!pqisrc_is_logical_device(device)) {
- pqisrc_free_tid(softs,device->target);
+ /* Clear the "device" field in the rcb.
+ * Response coming after device removal shouldn't access this field
+ */
+ for(i = 1; i <= softs->max_outstanding_io; i++)
+ {
+ rcb = &softs->rcb[i];
+ if(rcb->dvp == device) {
+ DBG_WARN("Pending requests for the removing device\n");
+ rcb->dvp = NULL;
}
- pqisrc_device_mem_free(softs, device);
- OS_RELEASE_SPINLOCK(&softs->devlist_lock);
+ }
+
+ OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
+
+ if (!pqisrc_is_logical_device(device)) {
+ pqisrc_free_tid(softs,device->target);
+ }
+
+ softs->device_list[device->target][device->lun] = NULL;
+
+ pqisrc_device_mem_free(softs, device);
+
+ OS_RELEASE_SPINLOCK(&softs->devlist_lock);
}
/* Update the newly added devices to the device list */
-static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
+static void
+pqisrc_update_device_list(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *new_device_list[], int num_new_devices)
{
int ret;
@@ -1353,6 +1482,7 @@ static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
int nadded = 0, nremoved = 0;
int j;
int tid = 0;
+ boolean_t driver_queue_depth_flag = false;
DBG_FUNC("IN\n");
@@ -1421,7 +1551,7 @@ static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
continue;
if (device->volume_offline)
continue;
-
+
/* physical device */
if (!pqisrc_is_logical_device(device)) {
tid = pqisrc_alloc_tid(softs);
@@ -1429,7 +1559,16 @@ static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0);
}
- softs->device_list[device->target][device->lun] = device;
+ /* This is not expected. We may lose the reference to the old device entry.
+ * If the target & lun ids are same, it is supposed to detect as an existing
+ * device, and not as a new device
+ */
+ if(softs->device_list[device->target][device->lun] != NULL) {
+ DBG_WARN("Overwriting T : %d L :%d\n",device->target,device->lun);
+ }
+
+ softs->device_list[device->target][device->lun] = device;
+
DBG_DISC("Added device %p at B : %d T : %d L : %d\n",device,
device->bus,device->target,device->lun);
/* To prevent this entry from being freed later. */
@@ -1438,7 +1577,6 @@ static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
nadded++;
}
- pqisrc_update_log_dev_qdepth(softs);
for(i = 0; i < PQI_MAX_DEVICES; i++) {
for(j = 0; j < PQI_MAX_MULTILUN; j++) {
@@ -1455,8 +1593,8 @@ static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
device = removed[i];
if (device == NULL)
continue;
- pqisrc_remove_device(softs, device);
pqisrc_display_device_info(softs, "removed", device);
+ pqisrc_remove_device(softs, device);
}
@@ -1474,8 +1612,20 @@ static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
device->advertised_queue_depth = device->queue_depth;
/* TBD: Call OS upper layer function to change device Q depth */
}
+ if (device->firmware_queue_depth_set == false)
+ driver_queue_depth_flag = true;
+ if (device->scsi_rescan)
+ os_rescan_target(softs, device);
}
}
+ /*
+ * If firmware queue depth is corrupt or not working
+ * use driver method to re-calculate the queue depth
+ * for all logical devices
+ */
+ if (driver_queue_depth_flag)
+ pqisrc_update_log_dev_qdepth(softs);
+
for(i = 0; i < nadded; i++) {
device = added[i];
if (device->expose_device) {
@@ -1508,10 +1658,10 @@ static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
free_and_out:
if (added)
os_mem_free(softs, (char *)added,
- sizeof(*added) * PQI_MAX_DEVICES);
+ sizeof(*added) * PQI_MAX_DEVICES);
if (removed)
os_mem_free(softs, (char *)removed,
- sizeof(*removed) * PQI_MAX_DEVICES);
+ sizeof(*removed) * PQI_MAX_DEVICES);
DBG_FUNC("OUT\n");
}
@@ -1520,7 +1670,8 @@ free_and_out:
* Let the Adapter know about driver version using one of BMIC
* BMIC_WRITE_HOST_WELLNESS
*/
-int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
+int
+pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
{
int rval = PQI_STATUS_SUCCESS;
struct bmic_host_wellness_driver_version *host_wellness_driver_ver;
@@ -1529,7 +1680,7 @@ int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
DBG_FUNC("IN\n");
- memset(&request, 0, sizeof(request));
+ memset(&request, 0, sizeof(request));
data_length = sizeof(*host_wellness_driver_ver);
host_wellness_driver_ver = os_mem_alloc(softs, data_length);
@@ -1553,6 +1704,7 @@ int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
} else {
DBG_DISC("OS name length(%lu) is longer than buffer of driver_version\n",
strlen(softs->os_name));
+
}
host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
host_wellness_driver_ver->end_tag[0] = 'Z';
@@ -1567,11 +1719,12 @@ int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
return rval;
}
-/*
+/*
* Write current RTC time from host to the adapter using
* BMIC_WRITE_HOST_WELLNESS
*/
-int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
+int
+pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
{
int rval = PQI_STATUS_SUCCESS;
struct bmic_host_wellness_time *host_wellness_time;
@@ -1580,7 +1733,7 @@ int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
DBG_FUNC("IN\n");
- memset(&request, 0, sizeof(request));
+ memset(&request, 0, sizeof(request));
data_length = sizeof(*host_wellness_time);
host_wellness_time = os_mem_alloc(softs, data_length);
@@ -1595,7 +1748,7 @@ int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
host_wellness_time->start_tag[3] = '>';
host_wellness_time->time_tag[0] = 'T';
host_wellness_time->time_tag[1] = 'D';
- host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) -
+ host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) -
offsetof(struct bmic_host_wellness_time, century));
os_get_time(host_wellness_time);
@@ -1618,40 +1771,52 @@ int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
* Function used to perform a rescan of scsi devices
* for any config change events
*/
-int pqisrc_scan_devices(pqisrc_softstate_t *softs)
+int
+pqisrc_scan_devices(pqisrc_softstate_t *softs)
{
boolean_t is_physical_device;
int ret = PQI_STATUS_FAILURE;
int i;
int new_dev_cnt;
int phy_log_dev_cnt;
+ size_t queue_log_data_length;
uint8_t *scsi3addr;
+ uint8_t multiplier;
+ uint16_t qdepth;
uint32_t physical_cnt;
uint32_t logical_cnt;
+ uint32_t logical_queue_cnt;
uint32_t ndev_allocated = 0;
size_t phys_data_length, log_data_length;
reportlun_data_ext_t *physical_dev_list = NULL;
reportlun_data_ext_t *logical_dev_list = NULL;
reportlun_ext_entry_t *lun_ext_entry = NULL;
+ reportlun_queue_depth_data_t *logical_queue_dev_list = NULL;
bmic_ident_physdev_t *bmic_phy_info = NULL;
pqi_scsi_dev_t **new_device_list = NULL;
pqi_scsi_dev_t *device = NULL;
+
DBG_FUNC("IN\n");
ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list,
- &phys_data_length, &log_data_length);
+ &logical_queue_dev_list, &queue_log_data_length,
+ &phys_data_length, &log_data_length);
if (ret)
goto err_out;
- physical_cnt = BE_32(physical_dev_list->header.list_length)
+ physical_cnt = BE_32(physical_dev_list->header.list_length)
/ sizeof(physical_dev_list->lun_entries[0]);
logical_cnt = BE_32(logical_dev_list->header.list_length)
/ sizeof(logical_dev_list->lun_entries[0]);
- DBG_DISC("physical_cnt %d logical_cnt %d\n", physical_cnt, logical_cnt);
+ logical_queue_cnt = BE_32(logical_queue_dev_list->header.list_length)
+ / sizeof(logical_queue_dev_list->lun_entries[0]);
+
+
+ DBG_DISC("physical_cnt %d logical_cnt %d queue_cnt %d\n", physical_cnt, logical_cnt, logical_queue_cnt);
if (physical_cnt) {
bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info));
@@ -1685,6 +1850,7 @@ int pqisrc_scan_devices(pqisrc_softstate_t *softs)
ndev_allocated = phy_log_dev_cnt;
new_dev_cnt = 0;
for (i = 0; i < phy_log_dev_cnt; i++) {
+
if (i < physical_cnt) {
is_physical_device = true;
lun_ext_entry = &physical_dev_list->lun_entries[i];
@@ -1695,6 +1861,7 @@ int pqisrc_scan_devices(pqisrc_softstate_t *softs)
}
scsi3addr = lun_ext_entry->lunid;
+
/* Save the target sas adderess for external raid device */
if(lun_ext_entry->device_type == CONTROLLER_DEVICE) {
int target = lun_ext_entry->lunid[3] & 0x3f;
@@ -1711,10 +1878,34 @@ int pqisrc_scan_devices(pqisrc_softstate_t *softs)
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
device->wwid = lun_ext_entry->wwid;
device->is_physical_device = is_physical_device;
- if (!is_physical_device)
+ if (!is_physical_device && logical_queue_cnt--) {
device->is_external_raid_device =
pqisrc_is_external_raid_addr(scsi3addr);
-
+ /* The multiplier is the value we multiply the queue
+ * depth value with to get the actual queue depth.
+ * If multiplier is 1 multiply by 256 if
+ * multiplier 0 then multiply by 16 */
+ multiplier = logical_queue_dev_list->lun_entries[i - physical_cnt].multiplier;
+ qdepth = logical_queue_dev_list->lun_entries[i - physical_cnt].queue_depth;
+ if (multiplier) {
+ device->firmware_queue_depth_set = true;
+ device->queue_depth = qdepth*256;
+ } else {
+ device->firmware_queue_depth_set = true;
+ device->queue_depth = qdepth*16;
+ }
+ if (device->queue_depth > softs->adapterQDepth) {
+ device->firmware_queue_depth_set = true;
+ device->queue_depth = softs->adapterQDepth;
+ }
+ if ((multiplier == 1) &&
+ (qdepth <= 0 || qdepth >= MAX_RAW_M256_QDEPTH))
+ device->firmware_queue_depth_set = false;
+ if ((multiplier == 0) &&
+ (qdepth <= 0 || qdepth >= MAX_RAW_M16_QDEPTH))
+ device->firmware_queue_depth_set = false;
+ }
+
/* Get device type, vendor, model, device ID. */
ret = pqisrc_get_dev_data(softs, device);
@@ -1724,6 +1915,12 @@ int pqisrc_scan_devices(pqisrc_softstate_t *softs)
DBG_DISC("INQUIRY FAILED \n");
continue;
}
+ /* Set controller queue depth to what
+ * it was from the scsi midlayer */
+ if (device->devtype == RAID_DEVICE) {
+ device->firmware_queue_depth_set = true;
+ device->queue_depth = softs->adapterQDepth;
+ }
pqisrc_assign_btl(device);
/*
@@ -1769,7 +1966,7 @@ int pqisrc_scan_devices(pqisrc_softstate_t *softs)
device->sas_address = BE_64(lun_ext_entry->wwid);
}
new_dev_cnt++;
- break;
+ break;
case TAPE_DEVICE:
case MEDIUM_CHANGER_DEVICE:
new_dev_cnt++;
@@ -1786,6 +1983,7 @@ int pqisrc_scan_devices(pqisrc_softstate_t *softs)
break;
case SES_DEVICE:
case CONTROLLER_DEVICE:
+ default:
break;
}
}
@@ -1805,12 +2003,15 @@ err_out:
}
}
os_mem_free(softs, (char *)new_device_list,
- sizeof(*new_device_list) * ndev_allocated);
+ sizeof(*new_device_list) * ndev_allocated);
}
if(physical_dev_list)
os_mem_free(softs, (char *)physical_dev_list, phys_data_length);
if(logical_dev_list)
os_mem_free(softs, (char *)logical_dev_list, log_data_length);
+ if(logical_queue_dev_list)
+ os_mem_free(softs, (char*)logical_queue_dev_list,
+ queue_log_data_length);
if (bmic_phy_info)
os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info));
@@ -1822,7 +2023,8 @@ err_out:
/*
* Clean up memory allocated for devices.
*/
-void pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
+void
+pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
{
int i = 0,j = 0;
@@ -1831,7 +2033,7 @@ void pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
for(i = 0; i < PQI_MAX_DEVICES; i++) {
for(j = 0; j < PQI_MAX_MULTILUN; j++) {
- if (softs->device_list[i][j] == NULL)
+ if (softs->device_list[i][j] == NULL)
continue;
dvp = softs->device_list[i][j];
pqisrc_device_mem_free(softs, dvp);
diff --git a/sys/dev/smartpqi/smartpqi_event.c b/sys/dev/smartpqi/smartpqi_event.c
index bc24465b8de4..9cdf388a1260 100644
--- a/sys/dev/smartpqi/smartpqi_event.c
+++ b/sys/dev/smartpqi/smartpqi_event.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -49,7 +48,8 @@ pqisrc_rescan_devices(pqisrc_softstate_t *softs)
return ret;
}
-void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs)
+void
+pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs)
{
os_sema_lock(&softs->scan_lock);
os_sema_unlock(&softs->scan_lock);
@@ -58,8 +58,8 @@ void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs)
/*
* Subroutine to acknowledge the events processed by the driver to the adapter.
*/
-static void
-pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
+static void
+pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
struct pqi_event *event)
{
@@ -71,7 +71,7 @@ pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
DBG_FUNC("IN\n");
request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
- request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) -
+ request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) -
PQI_REQUEST_HEADER_LENGTH);
request.event_type = event->event_type;
request.event_id = event->event_id;
@@ -91,8 +91,8 @@ pqisrc_acknowledge_event(pqisrc_softstate_t *softs,
COND_WAIT(((ib_q->pi_local) == *(ib_q->ci_virt_addr)), tmo);
if (tmo <= 0) {
DBG_ERR("wait for event acknowledge timed out\n");
- DBG_ERR("tmo : %d\n",tmo);
- }
+ DBG_ERR("tmo : %d\n",tmo);
+ }
DBG_FUNC(" OUT\n");
}
@@ -106,9 +106,10 @@ pqisrc_ack_all_events(void *arg1)
int i;
struct pqi_event *pending_event;
pqisrc_softstate_t *softs = (pqisrc_softstate_t*)arg1;
-
+
DBG_FUNC(" IN\n");
+
pending_event = &softs->pending_events[0];
for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
if (pending_event->pending == true) {
@@ -129,7 +130,7 @@ pqisrc_ack_all_events(void *arg1)
/*
* Get event index from event type to validate the type of event.
*/
-static int
+static int
pqisrc_event_type_to_event_index(unsigned event_type)
{
int index;
@@ -164,7 +165,7 @@ pqisrc_event_type_to_event_index(unsigned event_type)
/*
* Function used to process the events supported by the adapter.
*/
-int
+int
pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
{
uint32_t obq_pi,obq_ci;
@@ -175,8 +176,6 @@ pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
DBG_FUNC(" IN\n");
- OS_ATOMIC64_INC(softs, num_intrs);
-
event_q = &softs->event_q;
obq_ci = event_q->ci_local;
obq_pi = *(event_q->pi_virt_addr);
@@ -207,10 +206,10 @@ pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
pending_event->additional_event_id = response.additional_event_id;
}
}
-
+
obq_ci = (obq_ci + 1) % event_q->num_elem;
}
- /* Update CI */
+ /* Update CI */
event_q->ci_local = obq_ci;
PCI_MEM_PUT32(softs, event_q->ci_register_abs,
event_q->ci_register_offset, event_q->ci_local);
@@ -223,14 +222,16 @@ pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id)
DBG_FUNC("OUT");
return PQI_STATUS_SUCCESS;
+
}
/*
* Function used to send a general management request to adapter.
*/
-int pqisrc_submit_management_req(pqisrc_softstate_t *softs,
+int
+pqisrc_submit_management_req(pqisrc_softstate_t *softs,
pqi_event_config_request_t *request)
-{
+{
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *op_ib_q = &softs->op_raid_ib_q[0];
rcb_t *rcb = NULL;
@@ -242,7 +243,7 @@ int pqisrc_submit_management_req(pqisrc_softstate_t *softs,
if (INVALID_ELEM == request->request_id) {
DBG_ERR("Tag not available\n");
ret = PQI_STATUS_FAILURE;
- goto err_out;
+ goto err_out;
}
rcb = &softs->rcb[request->request_id];
@@ -255,19 +256,19 @@ int pqisrc_submit_management_req(pqisrc_softstate_t *softs,
goto err_cmd;
}
- ret = pqisrc_wait_on_condition(softs, rcb);
+ ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Management request timed out !!\n");
goto err_cmd;
}
- os_reset_rcb(rcb);
+ os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist,request->request_id);
DBG_FUNC("OUT\n");
return ret;
err_cmd:
- os_reset_rcb(rcb);
+ os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist,request->request_id);
err_out:
DBG_FUNC(" failed OUT : %d\n", ret);
@@ -277,9 +278,9 @@ err_out:
/*
* Build and send the general management request.
*/
-static int
-pqi_event_configure(pqisrc_softstate_t *softs ,
- pqi_event_config_request_t *request,
+static int
+pqi_event_configure(pqisrc_softstate_t *softs ,
+ pqi_event_config_request_t *request,
dma_mem_t *buff)
{
int ret = PQI_STATUS_SUCCESS;
@@ -287,9 +288,9 @@ pqi_event_configure(pqisrc_softstate_t *softs ,
DBG_FUNC(" IN\n");
request->header.comp_feature = 0x00;
- request->header.iu_length = sizeof(pqi_event_config_request_t) -
+ request->header.iu_length = sizeof(pqi_event_config_request_t) -
PQI_REQUEST_HEADER_LENGTH; /* excluding IU header length */
-
+
/*Op OQ id where response to be delivered */
request->response_queue_id = softs->op_ob_q[0].q_id;
request->buffer_length = buff->size;
@@ -297,15 +298,16 @@ pqi_event_configure(pqisrc_softstate_t *softs ,
request->sg_desc.length = buff->size;
request->sg_desc.zero = 0;
request->sg_desc.type = SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT;
-
+
/* submit management req IU*/
ret = pqisrc_submit_management_req(softs,request);
if(ret)
goto err_out;
+
DBG_FUNC(" OUT\n");
return ret;
-
+
err_out:
DBG_FUNC("Failed OUT\n");
return ret;
@@ -315,11 +317,12 @@ err_out:
* Prepare REPORT EVENT CONFIGURATION IU to request that
* event configuration information be reported.
*/
-int pqisrc_report_event_config(pqisrc_softstate_t *softs)
+int
+pqisrc_report_event_config(pqisrc_softstate_t *softs)
{
int ret,i ;
- pqi_event_config_request_t request;
+ pqi_event_config_request_t request;
pqi_event_config_t *event_config_p ;
dma_mem_t buf_report_event ;
/*bytes to be allocaed for report event config data-in buffer */
@@ -328,7 +331,7 @@ int pqisrc_report_event_config(pqisrc_softstate_t *softs)
DBG_FUNC(" IN\n");
- memset(&buf_report_event, 0, sizeof(struct dma_mem));
+ memset(&buf_report_event, 0, sizeof(struct dma_mem));
buf_report_event.tag = "pqi_report_event_buf" ;
buf_report_event.size = alloc_size;
buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN;
@@ -341,26 +344,26 @@ int pqisrc_report_event_config(pqisrc_softstate_t *softs)
}
DBG_INFO("buf_report_event.dma_addr = %p \n",(void*)buf_report_event.dma_addr);
DBG_INFO("buf_report_event.virt_addr = %p \n",(void*)buf_report_event.virt_addr);
-
+
request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
/* Event configuration */
ret=pqi_event_configure(softs,&request,&buf_report_event);
if(ret)
goto free_mem;
-
+
event_config_p = (pqi_event_config_t*)buf_report_event.virt_addr;
softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors,
PQI_MAX_EVENT_DESCRIPTORS) ;
-
+
for (i=0; i < softs->event_config.num_event_descriptors ;i++){
- softs->event_config.descriptors[i].event_type =
+ softs->event_config.descriptors[i].event_type =
event_config_p->descriptors[i].event_type;
}
/* free the allocated memory*/
os_dma_mem_free(softs, &buf_report_event);
-
+
DBG_FUNC(" OUT\n");
return ret;
@@ -375,7 +378,8 @@ err_out:
* Prepare SET EVENT CONFIGURATION IU to request that
* event configuration parameters be set.
*/
-int pqisrc_set_event_config(pqisrc_softstate_t *softs)
+int
+pqisrc_set_event_config(pqisrc_softstate_t *softs)
{
int ret,i;
@@ -392,19 +396,19 @@ int pqisrc_set_event_config(pqisrc_softstate_t *softs)
buf_set_event.tag = "pqi_set_event_buf";
buf_set_event.size = alloc_size;
buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN;
-
+
/* allocate memory */
ret = os_dma_mem_alloc(softs, &buf_set_event);
if (ret) {
DBG_ERR("Failed to Allocate set event config buffer : %d\n", ret);
goto err_out;
}
-
+
DBG_INFO("buf_set_event.dma_addr = %p\n",(void*)buf_set_event.dma_addr);
DBG_INFO("buf_set_event.virt_addr = %p\n",(void*)buf_set_event.virt_addr);
request.header.iu_type = PQI_REQUEST_IU_SET_EVENT_CONFIG;
- request.iu_specific.global_event_oq_id = softs->event_q.q_id;
+ request.iu_specific.global_event_oq_id = softs->event_q.q_id;
/*pointer to data-out buffer*/
@@ -412,22 +416,24 @@ int pqisrc_set_event_config(pqisrc_softstate_t *softs)
event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors;
+
for (i=0; i < softs->event_config.num_event_descriptors ; i++){
- event_config_p->descriptors[i].event_type =
+ event_config_p->descriptors[i].event_type =
softs->event_config.descriptors[i].event_type;
if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1)
event_config_p->descriptors[i].oq_id = softs->event_q.q_id;
else
event_config_p->descriptors[i].oq_id = 0; /* Not supported this event. */
-
+
+
}
/* Event configuration */
ret = pqi_event_configure(softs,&request,&buf_set_event);
if(ret)
goto free_mem;
-
+
os_dma_mem_free(softs, &buf_set_event);
-
+
DBG_FUNC(" OUT\n");
return ret;
diff --git a/sys/dev/smartpqi/smartpqi_helper.c b/sys/dev/smartpqi/smartpqi_helper.c
index 527756602929..809677c7cc1e 100644
--- a/sys/dev/smartpqi/smartpqi_helper.c
+++ b/sys/dev/smartpqi/smartpqi_helper.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +27,62 @@
#include "smartpqi_includes.h"
+/* read and modify controller diagnostic option - PQI_PTRAID_UPDATE_ON_RESCAN_LUNS */
+void
+pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *softs)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ uint32_t diags_options = 0;
+ pqisrc_raid_req_t request;
+
+ DBG_NOTE("IN\n");
+
+ memset(&request, 0, sizeof(request));
+ /* read diags options of controller */
+ ret = pqisrc_build_send_raid_request(softs, &request,
+ (void*)&diags_options,
+ sizeof(diags_options),
+ BMIC_SENSE_DIAGS_OPTIONS,
+ 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_WARN("Request failed for BMIC Sense Diags Option command."
+ "ret:%d\n",ret);
+ return;
+ }
+ DBG_NOTE("diags options data after read: %#x\n",diags_options);
+ diags_options |= PQI_PTRAID_UPDATE_ON_RESCAN_LUNS;
+ DBG_NOTE("diags options data to write: %#x\n",diags_options);
+ memset(&request, 0, sizeof(request));
+ /* write specified diags options to controller */
+ ret = pqisrc_build_send_raid_request(softs, &request,
+ (void*)&diags_options,
+ sizeof(diags_options),
+ BMIC_SET_DIAGS_OPTIONS,
+ 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+ if (ret != PQI_STATUS_SUCCESS)
+ DBG_WARN("Request failed for BMIC Set Diags Option command."
+ "ret:%d\n",ret);
+#if 0
+ diags_options = 0;
+ memset(&request, 0, sizeof(request));
+ ret = pqisrc_build_send_raid_request(softs, &request,
+ (void*)&diags_options,
+ sizeof(diags_options),
+ BMIC_SENSE_DIAGS_OPTIONS,
+ 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
+ if (ret != PQI_STATUS_SUCCESS)
+ DBG_WARN("Request failed for BMIC Sense Diags Option command."
+ "ret:%d\n",ret);
+ DBG_NOTE("diags options after re-read: %#x\n",diags_options);
+#endif
+ DBG_NOTE("OUT\n");
+}
+
/*
* Function used to validate the adapter health.
*/
-boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *softs)
+boolean_t
+pqisrc_ctrl_offline(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
@@ -43,7 +94,8 @@ boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *softs)
/* Function used set/clear legacy INTx bit in Legacy Interrupt INTx
* mask clear pqi register
*/
-void pqisrc_configure_legacy_intx(pqisrc_softstate_t *softs, boolean_t enable_intx)
+void
+pqisrc_configure_legacy_intx(pqisrc_softstate_t *softs, boolean_t enable_intx)
{
uint32_t intx_mask;
uint32_t *reg_addr = NULL;
@@ -65,7 +117,8 @@ void pqisrc_configure_legacy_intx(pqisrc_softstate_t *softs, boolean_t enable_in
/*
* Function used to take exposed devices to OS as offline.
*/
-void pqisrc_take_devices_offline(pqisrc_softstate_t *softs)
+void
+pqisrc_take_devices_offline(pqisrc_softstate_t *softs)
{
pqi_scsi_dev_t *device = NULL;
int i,j;
@@ -86,14 +139,25 @@ void pqisrc_take_devices_offline(pqisrc_softstate_t *softs)
/*
* Function used to take adapter offline.
*/
-void pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs)
+void
+pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs)
{
-
DBG_FUNC("IN\n");
softs->ctrl_online = false;
- pqisrc_trigger_nmi_sis(softs);
+
+ int lockupcode = 0;
+
+ if (SIS_IS_KERNEL_PANIC(softs)) {
+ lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7);
+ DBG_ERR("Controller FW is not runnning, Lockup code = %x\n", lockupcode);
+ }
+ else {
+ pqisrc_trigger_nmi_sis(softs);
+ }
+
os_complete_outstanding_cmds_nodevice(softs);
+ pqisrc_wait_for_rescan_complete(softs);
pqisrc_take_devices_offline(softs);
DBG_FUNC("OUT\n");
@@ -102,40 +166,21 @@ void pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs)
/*
* Timer handler for the adapter heart-beat.
*/
-void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs)
+void
+pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs)
{
- uint64_t num_intrs;
uint8_t take_offline = false;
DBG_FUNC("IN\n");
- num_intrs = OS_ATOMIC64_READ(softs, num_intrs);
-
- if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
- if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) {
- take_offline = true;
- goto take_ctrl_offline;
- }
- softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs);
- DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \
+ if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) {
+ take_offline = true;
+ goto take_ctrl_offline;
+ }
+ softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs);
+ DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \
softs->prev_heartbeat_count = %lx\n",
CTRLR_HEARTBEAT_CNT(softs), softs->prev_heartbeat_count);
- } else {
- if (num_intrs == softs->prev_num_intrs) {
- softs->num_heartbeats_requested++;
- if (softs->num_heartbeats_requested > PQI_MAX_HEARTBEAT_REQUESTS) {
- take_offline = true;
- goto take_ctrl_offline;
- }
- softs->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
-
- pqisrc_ack_all_events((void*)softs);
-
- } else {
- softs->num_heartbeats_requested = 0;
- }
- softs->prev_num_intrs = num_intrs;
- }
take_ctrl_offline:
if (take_offline){
@@ -149,24 +194,30 @@ take_ctrl_offline:
/*
* Conditional variable management routine for internal commands.
*/
-int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb){
+int
+pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb,
+ uint32_t timeout_in_msec)
+{
DBG_FUNC("IN\n");
int ret = PQI_STATUS_SUCCESS;
- uint32_t loop_cnt = 0;
+
+ /* 1 msec = 500 usec * 2 */
+ uint32_t loop_cnt = timeout_in_msec * 2;
+ uint32_t i = 0;
while (rcb->req_pending == true) {
OS_SLEEP(500); /* Micro sec */
-
- /*Polling needed for FreeBSD : since ithread routine is not scheduled
- during bootup, we could use polling until interrupts are
- enabled (using 'if (cold)'to check for the boot time before
- interrupts are enabled). */
+ /* Polling needed for FreeBSD : since ithread routine is not scheduled
+ * during bootup, we could use polling until interrupts are
+ * enabled (using 'if (cold)'to check for the boot time before
+ * interrupts are enabled). */
IS_POLLING_REQUIRED(softs);
- if (loop_cnt++ == PQISRC_CMD_TIMEOUT_CNT) {
+ if ((timeout_in_msec != TIMEOUT_INFINITE) && (i++ == loop_cnt)) {
DBG_ERR("ERR: Requested cmd timed out !!!\n");
ret = PQI_STATUS_TIMEOUT;
+ rcb->timedout = true;
break;
}
@@ -175,6 +226,7 @@ int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb){
ret = PQI_STATUS_FAILURE;
break;
}
+
}
rcb->req_pending = true;
@@ -184,32 +236,37 @@ int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb){
}
/* Function used to validate the device wwid. */
-boolean_t pqisrc_device_equal(pqi_scsi_dev_t *dev1,
+boolean_t
+pqisrc_device_equal(pqi_scsi_dev_t *dev1,
pqi_scsi_dev_t *dev2)
{
return dev1->wwid == dev2->wwid;
}
/* Function used to validate the device scsi3addr. */
-boolean_t pqisrc_scsi3addr_equal(uint8_t *scsi3addr1, uint8_t *scsi3addr2)
+boolean_t
+pqisrc_scsi3addr_equal(uint8_t *scsi3addr1, uint8_t *scsi3addr2)
{
return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
}
/* Function used to validate hba_lunid */
-boolean_t pqisrc_is_hba_lunid(uint8_t *scsi3addr)
+boolean_t
+pqisrc_is_hba_lunid(uint8_t *scsi3addr)
{
return pqisrc_scsi3addr_equal(scsi3addr, (uint8_t*)RAID_CTLR_LUNID);
}
/* Function used to validate type of device */
-boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *device)
+boolean_t
+pqisrc_is_logical_device(pqi_scsi_dev_t *device)
{
return !device->is_physical_device;
}
/* Function used to sanitize inquiry string */
-void pqisrc_sanitize_inquiry_string(unsigned char *s, int len)
+void
+pqisrc_sanitize_inquiry_string(unsigned char *s, int len)
{
boolean_t terminated = false;
@@ -236,7 +293,8 @@ static char *raid_levels[] = {
};
/* Get the RAID level from the index */
-char *pqisrc_raidlevel_to_string(uint8_t raid_level)
+char *
+pqisrc_raidlevel_to_string(uint8_t raid_level)
{
DBG_FUNC("IN\n");
if (raid_level < ARRAY_SIZE(raid_levels))
@@ -247,7 +305,8 @@ char *pqisrc_raidlevel_to_string(uint8_t raid_level)
}
/* Debug routine for displaying device info */
-void pqisrc_display_device_info(pqisrc_softstate_t *softs,
+void
+pqisrc_display_device_info(pqisrc_softstate_t *softs,
char *action, pqi_scsi_dev_t *device)
{
DBG_INFO( "%s scsi BTL %d:%d:%d: %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
@@ -266,9 +325,10 @@ void pqisrc_display_device_info(pqisrc_softstate_t *softs,
}
/* validate the structure sizes */
-void check_struct_sizes()
-{
-
+void
+check_struct_sizes()
+{
+
ASSERT(sizeof(SCSI3Addr_struct)== 2);
ASSERT(sizeof(PhysDevAddr_struct) == 8);
ASSERT(sizeof(LogDevAddr_struct)== 8);
@@ -276,7 +336,10 @@ void check_struct_sizes()
ASSERT(sizeof(RequestBlock_struct) == 20);
ASSERT(sizeof(MoreErrInfo_struct)== 8);
ASSERT(sizeof(ErrorInfo_struct)== 48);
- ASSERT(sizeof(IOCTL_Command_struct)== 86);
+ /* Checking the size of IOCTL_Command_struct for both
+ 64 bit and 32 bit system*/
+ ASSERT(sizeof(IOCTL_Command_struct)== 86 ||
+ sizeof(IOCTL_Command_struct)== 82);
ASSERT(sizeof(struct bmic_host_wellness_driver_version)== 42);
ASSERT(sizeof(struct bmic_host_wellness_time)== 20);
ASSERT(sizeof(struct pqi_dev_adminq_cap)== 8);
@@ -296,7 +359,8 @@ void check_struct_sizes()
ASSERT(sizeof(pqi_dev_cap_t)== 576);
ASSERT(sizeof(pqi_aio_req_t)== 128);
ASSERT(sizeof(pqisrc_raid_req_t)== 128);
- ASSERT(sizeof(pqi_tmf_req_t)== 32);
+ ASSERT(sizeof(pqi_raid_tmf_req_t)== 32);
+ ASSERT(sizeof(pqi_aio_tmf_req_t)== 32);
ASSERT(sizeof(struct pqi_io_response)== 16);
ASSERT(sizeof(struct sense_header_scsi)== 8);
ASSERT(sizeof(reportlun_header_t)==8);
@@ -306,5 +370,118 @@ void check_struct_sizes()
ASSERT(sizeof(pqisrc_raid_map_t)== 8256);
ASSERT(sizeof(bmic_ident_ctrl_t)== 325);
ASSERT(sizeof(bmic_ident_physdev_t)==2048);
-
+
+}
+
+uint32_t
+pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ uint32_t i, active_io = 0;
+ rcb_t* rcb;
+
+ for(i = 1; i <= softs->max_outstanding_io; i++) {
+ rcb = &softs->rcb[i];
+ if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) {
+ active_io++;
+ }
+ }
+ return active_io;
+}
+
+void
+check_device_pending_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ uint32_t tag = softs->max_outstanding_io, active_requests;
+ uint64_t timeout = 0, delay_in_usec = 1000; //In micro Seconds
+ rcb_t* rcb;
+
+ DBG_FUNC("IN\n");
+
+ active_requests = pqisrc_count_num_scsi_active_requests_on_dev(softs, device);
+
+ DBG_WARN("Device Outstanding IO count = %u\n", active_requests);
+
+ if(!active_requests)
+ return;
+
+ do {
+ rcb = &softs->rcb[tag];
+ if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) {
+ OS_BUSYWAIT(delay_in_usec);
+ timeout += delay_in_usec;
+ }
+ else
+ tag--;
+ if(timeout >= PQISRC_PENDING_IO_TIMEOUT_USEC) {
+ DBG_WARN("timed out waiting for pending IO\n");
+ return;
+ }
+ } while(tag);
+
+}
+
+inline uint64_t
+pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /*Increment device active io count by one*/
+ return OS_ATOMIC64_INC(&device->active_requests);
+#endif
+}
+
+inline uint64_t
+pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /*Decrement device active io count by one*/
+ return OS_ATOMIC64_DEC(&device->active_requests);
+#endif
+}
+
+inline void
+pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /* Reset device count to Zero */
+ OS_ATOMIC64_INIT(&device->active_requests, 0);
+#endif
+}
+
+inline uint64_t
+pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+#if PQISRC_DEVICE_IO_COUNTER
+ /* read device active count*/
+ return OS_ATOMIC64_READ(&device->active_requests);
+#endif
+}
+
+void
+pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
+{
+ uint64_t timeout_in_usec = 0, delay_in_usec = 1000; //In microseconds
+
+ DBG_FUNC("IN\n");
+
+ if(!softs->ctrl_online)
+ return;
+
+#if PQISRC_DEVICE_IO_COUNTER
+ DBG_NOTE("Device Outstanding IO count = %ld\n", pqisrc_read_device_active_io(softs, device));
+
+ while(pqisrc_read_device_active_io(softs, device)) {
+ OS_BUSYWAIT(delay_in_usec); // In microseconds
+ if(!softs->ctrl_online) {
+ DBG_WARN("Controller Offline was detected.\n");
+ }
+ timeout_in_usec += delay_in_usec;
+ if(timeout_in_usec >= PQISRC_PENDING_IO_TIMEOUT_USEC) {
+ DBG_WARN("timed out waiting for pending IO. DeviceOutStandingIo's=%ld\n",
+ pqisrc_read_device_active_io(softs, device));
+ return;
+ }
+ }
+#else
+ check_device_pending_commands_to_complete(softs, device);
+#endif
}
diff --git a/sys/dev/smartpqi/smartpqi_includes.h b/sys/dev/smartpqi/smartpqi_includes.h
index 823424e9c30e..6c91133cb15d 100644
--- a/sys/dev/smartpqi/smartpqi_includes.h
+++ b/sys/dev/smartpqi/smartpqi_includes.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -78,9 +77,12 @@
#include <vm/vm.h>
#include <vm/pmap.h>
+
+
#include "smartpqi_defines.h"
#include "smartpqi_structures.h"
#include "smartpqi_prototypes.h"
#include "smartpqi_ioctl.h"
+
#endif // _PQI_INCLUDES_H
diff --git a/sys/dev/smartpqi/smartpqi_init.c b/sys/dev/smartpqi/smartpqi_init.c
index 398a3bc207d1..1f127cff21ec 100644
--- a/sys/dev/smartpqi/smartpqi_init.c
+++ b/sys/dev/smartpqi/smartpqi_init.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,13 +27,17 @@
#include "smartpqi_includes.h"
+/* 5 mins timeout for quiesce */
+#define PQI_QUIESCE_TIMEOUT 300000
+
/*
* Request the adapter to get PQI capabilities supported.
*/
-static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
+static int
+pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
-
+
DBG_FUNC("IN\n");
gen_adm_req_iu_t admin_req;
@@ -79,7 +82,7 @@ static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
} else {
DBG_ERR("Failed to send admin req report pqi device capability\n");
goto err_admin_req;
-
+
}
softs->pqi_dev_cap.max_iqs = capability->max_iqs;
@@ -108,6 +111,7 @@ static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
+
os_mem_free(softs, (void *)capability,
REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
os_dma_mem_free(softs, &pqi_cap_dma_buf);
@@ -129,9 +133,9 @@ err_out:
/*
* Function used to deallocate the used rcb.
*/
-void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
+void
+pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
{
-
uint32_t num_req;
size_t size;
int i;
@@ -146,10 +150,12 @@ void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
DBG_FUNC("OUT\n");
}
+
/*
* Allocate memory for rcb and SG descriptors.
*/
-static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
+static int
+pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
int i = 0;
@@ -163,7 +169,7 @@ static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
/* Set maximum outstanding requests */
/* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
* The rcb will be accessed by using the tag as index
- * * As 0 tag index is not used, we need to allocate one extra.
+ * As 0 tag index is not used, we need to allocate one extra.
*/
softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
num_req = softs->max_outstanding_io + 1;
@@ -217,15 +223,16 @@ err_out:
* Function used to decide the operational queue configuration params
* - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
*/
-void pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
+void
+pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
{
uint16_t total_iq_elements;
DBG_FUNC("IN\n");
- DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d",
+ DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d",
softs->intr_count, softs->num_cpus_online);
-
+
if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
/* Share the event and Operational queue. */
softs->num_op_obq = 1;
@@ -233,17 +240,13 @@ void pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
}
else {
/* Note : One OBQ (OBQ0) reserved for event queue */
- softs->num_op_obq = MIN(softs->num_cpus_online,
+ softs->num_op_obq = MIN(softs->num_cpus_online,
softs->intr_count) - 1;
- softs->num_op_obq = softs->intr_count - 1;
softs->share_opq_and_eventq = false;
}
-
- /*
- * softs->num_cpus_online is set as number of physical CPUs,
- * So we can have more queues/interrupts .
- */
- if (softs->intr_count > 1)
+ /* If the available interrupt count is more than one,
+ we dont need to share the interrupt for IO and event queue */
+ if (softs->intr_count > 1)
softs->share_opq_and_eventq = false;
DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq);
@@ -261,23 +264,23 @@ void pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
softs->max_ib_iu_length =
(softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
softs->ibq_elem_size;
-
+
}
- /* If Max. Outstanding IO came with Max. Spanning element count then,
+ /* If Max. Outstanding IO came with Max. Spanning element count then,
needed elements per IO are multiplication of
Max.Outstanding IO and Max.Spanning element */
- total_iq_elements = (softs->max_outstanding_io *
+ total_iq_elements = (softs->max_outstanding_io *
(softs->max_ib_iu_length / softs->ibq_elem_size));
softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
- softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
+ softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
softs->pqi_dev_cap.max_iq_elements);
- softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
+ softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
softs->pqi_dev_cap.max_oq_elements);
- softs->max_sg_per_iu = ((softs->max_ib_iu_length -
+ softs->max_sg_per_iu = ((softs->max_ib_iu_length -
softs->ibq_elem_size) /
sizeof(sgt_t)) +
MAX_EMBEDDED_SG_IN_FIRST_IU;
@@ -293,11 +296,12 @@ void pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
/*
* Configure the operational queue parameters.
*/
-int pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
+int
+pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
- /* Get the PQI capability,
+ /* Get the PQI capability,
REPORT PQI DEVICE CAPABILITY request */
ret = pqisrc_report_pqi_capability(softs);
if (ret) {
@@ -310,11 +314,11 @@ int pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
/* Decide the Op queue configuration */
- pqisrc_decide_opq_config(softs);
+ pqisrc_decide_opq_config(softs);
DBG_FUNC("OUT\n");
return ret;
-
+
err_out:
DBG_FUNC("OUT failed\n");
return ret;
@@ -323,7 +327,8 @@ err_out:
/*
* Validate the PQI mode of adapter.
*/
-int pqisrc_check_pqimode(pqisrc_softstate_t *softs)
+int
+pqisrc_check_pqimode(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_FAILURE;
int tmo = 0;
@@ -335,7 +340,7 @@ int pqisrc_check_pqimode(pqisrc_softstate_t *softs)
tmo = PQISRC_PQIMODE_READY_TIMEOUT;
do {
signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
-
+
if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
sizeof(uint64_t)) == 0) {
ret = PQI_STATUS_SUCCESS;
@@ -362,6 +367,7 @@ int pqisrc_check_pqimode(pqisrc_softstate_t *softs)
goto err_out;
}
+
tmo = PQISRC_PQIMODE_READY_TIMEOUT;
/* Check the PQI device status register */
COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
@@ -379,11 +385,207 @@ err_out:
return ret;
}
+/* PQI Feature processing */
+static int
+pqisrc_config_table_update(struct pqisrc_softstate *softs,
+ uint16_t first_section, uint16_t last_section)
+{
+ pqi_vendor_general_request_t request;
+ int ret = PQI_STATUS_FAILURE;
+
+ memset(&request, 0, sizeof(request));
+
+ request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
+ request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH;
+ request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE;
+ request.data.config_table_update.first_section = first_section;
+ request.data.config_table_update.last_section = last_section;
+
+ ret = pqisrc_build_send_vendor_request(softs, &request, NULL);
+
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret);
+ return PQI_STATUS_FAILURE;
+ }
+
+ return PQI_STATUS_SUCCESS;
+}
+
+static inline
+boolean_t pqi_is_firmware_feature_supported(
+ struct pqi_conf_table_firmware_features *firmware_feature_list,
+ unsigned int bit_position)
+{
+ unsigned int byte_index;
+
+ byte_index = bit_position / BITS_PER_BYTE;
+
+ if (byte_index >= firmware_feature_list->num_elements)
+ return false;
+
+ return firmware_feature_list->features_supported[byte_index] &
+ (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static inline
+boolean_t pqi_is_firmware_feature_enabled(
+ struct pqi_conf_table_firmware_features *firmware_feature_list,
+ uint8_t *firmware_features_addr, unsigned int bit_position)
+{
+ unsigned int byte_index;
+ uint8_t *feature_enabled_addr;
+
+ byte_index = (bit_position / BITS_PER_BYTE) +
+ (firmware_feature_list->num_elements * 2);
+
+ feature_enabled_addr = firmware_features_addr +
+ offsetof(struct pqi_conf_table_firmware_features,
+ features_supported) + byte_index;
+
+ return *feature_enabled_addr &
+ (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static inline void
+pqi_request_firmware_feature(
+ struct pqi_conf_table_firmware_features *firmware_feature_list,
+ unsigned int bit_position)
+{
+ unsigned int byte_index;
+
+ byte_index = (bit_position / BITS_PER_BYTE) +
+ firmware_feature_list->num_elements;
+
+ firmware_feature_list->features_supported[byte_index] |=
+ (1 << (bit_position % BITS_PER_BYTE));
+}
+
+/* Update PQI config table firmware features section and inform the firmware */
+static int
+pqisrc_set_host_requested_firmware_feature(pqisrc_softstate_t *softs,
+ struct pqi_conf_table_firmware_features *firmware_feature_list)
+{
+ uint8_t *request_feature_addr;
+ void *request_feature_abs_addr;
+
+ request_feature_addr = firmware_feature_list->features_supported +
+ firmware_feature_list->num_elements;
+ request_feature_abs_addr = softs->fw_features_section_abs_addr +
+ (request_feature_addr - (uint8_t*)firmware_feature_list);
+
+ os_io_memcpy(request_feature_abs_addr, request_feature_addr,
+ firmware_feature_list->num_elements);
+
+ return pqisrc_config_table_update(softs,
+ PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES,
+ PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES);
+}
+
+/* Check firmware has enabled the feature specified in the respective bit position. */
+inline boolean_t
+pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *softs,
+ struct pqi_conf_table_firmware_features *firmware_feature_list, uint16_t bit_position)
+{
+ uint16_t byte_index;
+ uint8_t *features_enabled_abs_addr;
+
+ byte_index = (bit_position / BITS_PER_BYTE) +
+ (firmware_feature_list->num_elements * 2);
+
+ features_enabled_abs_addr = softs->fw_features_section_abs_addr +
+ offsetof(struct pqi_conf_table_firmware_features,features_supported) + byte_index;
+
+ return *features_enabled_abs_addr &
+ (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static void
+pqi_firmware_feature_status(struct pqisrc_softstate *softs,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ switch(firmware_feature->feature_bit) {
+ case PQI_FIRMWARE_FEATURE_OFA:
+ break;
+ case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT:
+ softs->timeout_in_passthrough = true;
+ break;
+ case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT:
+ softs->timeout_in_tmf = true;
+ break;
+ default:
+ DBG_NOTE("Nothing to do \n");
+ }
+}
+
+/* Firmware features supported by the driver */
+static struct
+pqi_firmware_feature pqi_firmware_features[] = {
+ {
+ .feature_name = "Support timeout for pass-through commands",
+ .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT,
+ .feature_status = pqi_firmware_feature_status,
+ },
+ {
+ .feature_name = "Support timeout for LUN Reset TMF",
+ .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT,
+ .feature_status = pqi_firmware_feature_status,
+ }
+};
+
+static void
+pqisrc_process_firmware_features(pqisrc_softstate_t *softs)
+{
+ int rc;
+ struct pqi_conf_table_firmware_features *firmware_feature_list;
+ unsigned int i;
+ unsigned int num_features_requested;
+
+ firmware_feature_list = (struct pqi_conf_table_firmware_features*)
+ softs->fw_features_section_abs_addr;
+
+ /* Check features and request those supported by firmware and driver.*/
+ for (i = 0, num_features_requested = 0;
+ i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ /* Firmware support it ? */
+ if (pqi_is_firmware_feature_supported(firmware_feature_list,
+ pqi_firmware_features[i].feature_bit)) {
+ pqi_request_firmware_feature(firmware_feature_list,
+ pqi_firmware_features[i].feature_bit);
+ pqi_firmware_features[i].supported = true;
+ num_features_requested++;
+ DBG_NOTE("%s supported by driver, requesting firmware to enable it\n",
+ pqi_firmware_features[i].feature_name);
+ } else {
+ DBG_NOTE("%s supported by driver, but not by current firmware\n",
+ pqi_firmware_features[i].feature_name);
+ }
+ }
+ if (num_features_requested == 0)
+ return;
+
+ rc = pqisrc_set_host_requested_firmware_feature(softs, firmware_feature_list);
+ if (rc) {
+ DBG_ERR("Failed to update pqi config table\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+ if (pqi_is_firmware_feature_enabled(firmware_feature_list,
+ softs->fw_features_section_abs_addr, pqi_firmware_features[i].feature_bit)) {
+ pqi_firmware_features[i].enabled = true;
+ DBG_NOTE("Firmware feature %s enabled \n",pqi_firmware_features[i].feature_name);
+ if(pqi_firmware_features[i].feature_status)
+ pqi_firmware_features[i].feature_status(softs, &(pqi_firmware_features[i]));
+ }
+ }
+}
+
/*
* Get the PQI configuration table parameters.
* Currently using for heart-beat counter scratch-pad register.
*/
-int pqisrc_process_config_table(pqisrc_softstate_t *softs)
+int
+pqisrc_process_config_table(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_FAILURE;
uint32_t config_table_size;
@@ -407,6 +609,13 @@ int pqisrc_process_config_table(pqisrc_softstate_t *softs)
return ret;
}
+ if (config_table_size < sizeof(conf_table) ||
+ config_table_size > PQI_CONF_TABLE_MAX_LEN) {
+ DBG_ERR("Invalid PQI conf table length of %u\n",
+ config_table_size);
+ goto out;
+ }
+
config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
softs->pqi_cap.conf_tab_off);
@@ -414,6 +623,7 @@ int pqisrc_process_config_table(pqisrc_softstate_t *softs)
softs->pqi_cap.conf_tab_off,
(uint8_t*)conf_table, config_table_size);
+
if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
sizeof(conf_table->sign)) != 0) {
DBG_ERR("Invalid PQI config signature\n");
@@ -423,20 +633,23 @@ int pqisrc_process_config_table(pqisrc_softstate_t *softs)
section_off = LE_32(conf_table->first_section_off);
while (section_off) {
+
if (section_off+ sizeof(*section_hdr) >= config_table_size) {
- DBG_ERR("PQI config table section offset (%u) beyond \
- end of config table (config table length: %u)\n",
- section_off, config_table_size);
+ DBG_INFO("Reached end of PQI config table. Breaking off.\n");
break;
}
-
+
section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
-
+
switch (LE_16(section_hdr->section_id)) {
case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
- case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
case PQI_CONF_TABLE_SECTION_DEBUG:
+ break;
+ case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
+ softs->fw_features_section_off = softs->pqi_cap.conf_tab_off + section_off;
+ softs->fw_features_section_abs_addr = softs->pci_mem_base_vaddr + softs->fw_features_section_off;
+ pqisrc_process_firmware_features(softs);
break;
case PQI_CONF_TABLE_SECTION_HEARTBEAT:
softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
@@ -460,7 +673,8 @@ out:
}
/* Wait for PQI reset completion for the adapter*/
-int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
+int
+pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
pqi_reset_reg_t reset_reg;
@@ -476,7 +690,7 @@ int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
while(1) {
if (pqi_reset_timeout++ == max_timeout) {
- return PQI_STATUS_TIMEOUT;
+ return PQI_STATUS_TIMEOUT;
}
OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
reset_reg.all_bits = PCI_MEM_GET32(softs,
@@ -491,7 +705,8 @@ int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
/*
* Function used to perform PQI hard reset.
*/
-int pqi_reset(pqisrc_softstate_t *softs)
+int
+pqi_reset(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t val = 0;
@@ -499,7 +714,7 @@ int pqi_reset(pqisrc_softstate_t *softs)
DBG_FUNC("IN\n");
- if (true == softs->ctrl_in_pqi_mode) {
+ if (true == softs->ctrl_in_pqi_mode) {
if (softs->pqi_reset_quiesce_allowed) {
val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
@@ -535,7 +750,8 @@ int pqi_reset(pqisrc_softstate_t *softs)
/*
* Initialize the adapter with supported PQI configuration.
*/
-int pqisrc_pqi_init(pqisrc_softstate_t *softs)
+int
+pqisrc_pqi_init(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
@@ -559,7 +775,7 @@ int pqisrc_pqi_init(pqisrc_softstate_t *softs)
goto err_out;
}
- softs->intr_type = INTR_TYPE_NONE;
+ softs->intr_type = INTR_TYPE_NONE;
/* Get the interrupt count, type, priority available from OS */
ret = os_get_intr_config(softs);
@@ -577,16 +793,16 @@ int pqisrc_pqi_init(pqisrc_softstate_t *softs)
sis_enable_intx(softs);
}
- /* Create Admin Queue pair*/
+ /* Create Admin Queue pair*/
ret = pqisrc_create_admin_queue(softs);
if(ret) {
DBG_ERR("Failed to configure admin queue\n");
goto err_admin_queue;
}
- /* For creating event and IO operational queues we have to submit
- admin IU requests.So Allocate resources for submitting IUs */
-
+ /* For creating event and IO operational queues we have to submit
+ admin IU requests.So Allocate resources for submitting IUs */
+
/* Allocate the request container block (rcb) */
ret = pqisrc_allocate_rcb(softs);
if (ret == PQI_STATUS_FAILURE) {
@@ -626,7 +842,7 @@ err_create_opq:
err_config_opq:
pqisrc_destroy_taglist(softs,&softs->taglist);
err_taglist:
- pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
+ pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
err_rcb:
pqisrc_destroy_admin_queue(softs);
err_admin_queue:
@@ -636,8 +852,8 @@ err_out:
return PQI_STATUS_FAILURE;
}
-/* */
-int pqisrc_force_sis(pqisrc_softstate_t *softs)
+int
+pqisrc_force_sis(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
@@ -661,7 +877,7 @@ int pqisrc_force_sis(pqisrc_softstate_t *softs)
ret = pqi_reset(softs);
if (ret) {
return ret;
- }
+ }
/* Re enable SIS */
ret = pqisrc_reenable_sis(softs);
if (ret) {
@@ -670,24 +886,53 @@ int pqisrc_force_sis(pqisrc_softstate_t *softs)
PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
- return ret;
+ return ret;
}
-int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
+static int
+pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
{
+ int count = 0;
int ret = PQI_STATUS_SUCCESS;
- int tmo = PQI_CMND_COMPLETE_TMO;
- COND_WAIT((softs->taglist.num_elem == softs->max_outstanding_io), tmo);
- if (!tmo) {
- DBG_ERR("Pending commands %x!!!",softs->taglist.num_elem);
- ret = PQI_STATUS_TIMEOUT;
+ DBG_NOTE("softs->taglist.num_elem : %d",softs->taglist.num_elem);
+
+ if (softs->taglist.num_elem == softs->max_outstanding_io)
+ return ret;
+ else {
+ DBG_WARN("%d commands pending\n",
+ softs->max_outstanding_io - softs->taglist.num_elem);
+
+ while(1) {
+
+ /* Since heartbeat timer stopped ,check for firmware status*/
+ if (SIS_IS_KERNEL_PANIC(softs)) {
+ DBG_ERR("Controller FW is not running\n");
+ return PQI_STATUS_FAILURE;
+ }
+
+ if (softs->taglist.num_elem != softs->max_outstanding_io) {
+ /* Sleep for 1 msec */
+ OS_SLEEP(1000);
+ count++;
+ if(count % 1000 == 0) {
+ DBG_WARN("Waited for %d seconds", count/1000);
+ }
+ if (count >= PQI_QUIESCE_TIMEOUT) {
+ return PQI_STATUS_FAILURE;
+ }
+ continue;
+ }
+ break;
+ }
}
return ret;
}
-void pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
+static void
+pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
{
+
int tag = 0;
rcb_t *rcb;
@@ -700,10 +945,12 @@ void pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
}
}
+
/*
* Uninitialize the resources used during PQI initialization.
*/
-void pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
+void
+pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
{
int i, ret;
@@ -715,29 +962,31 @@ void pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
/* Wait for commands to complete */
ret = pqisrc_wait_for_cmnd_complete(softs);
+ /* disable and free the interrupt resources */
+ os_destroy_intr(softs);
+
/* Complete all pending commands. */
if(ret != PQI_STATUS_SUCCESS) {
pqisrc_complete_internal_cmds(softs);
os_complete_outstanding_cmds_nodevice(softs);
}
- if(softs->devlist_lockcreated==true){
- os_uninit_spinlock(&softs->devlist_lock);
- softs->devlist_lockcreated = false;
- }
-
+ if(softs->devlist_lockcreated==true){
+ os_uninit_spinlock(&softs->devlist_lock);
+ softs->devlist_lockcreated = false;
+ }
+
for (i = 0; i < softs->num_op_raid_ibq; i++) {
- /* OP RAID IB Q */
- if(softs->op_raid_ib_q[i].lockcreated==true){
- OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
- softs->op_raid_ib_q[i].lockcreated = false;
- }
-
- /* OP AIO IB Q */
- if(softs->op_aio_ib_q[i].lockcreated==true){
- OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
- softs->op_aio_ib_q[i].lockcreated = false;
- }
+ /* OP RAID IB Q */
+ if(softs->op_raid_ib_q[i].lockcreated==true){
+ OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
+ softs->op_raid_ib_q[i].lockcreated = false;
+ }
+ /* OP AIO IB Q */
+ if(softs->op_aio_ib_q[i].lockcreated==true){
+ OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
+ softs->op_aio_ib_q[i].lockcreated = false;
+ }
}
/* Free Op queues */
@@ -745,15 +994,17 @@ void pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
os_dma_mem_free(softs, &softs->op_obq_dma_mem);
os_dma_mem_free(softs, &softs->event_q_dma_mem);
+
+
/* Free rcb */
pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
/* Free request id lists */
pqisrc_destroy_taglist(softs,&softs->taglist);
- if(softs->admin_ib_queue.lockcreated==true){
- OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
- softs->admin_ib_queue.lockcreated = false;
+ if(softs->admin_ib_queue.lockcreated==true) {
+ OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
+ softs->admin_ib_queue.lockcreated = false;
}
/* Free Admin Queue */
@@ -770,15 +1021,16 @@ void pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
/*
* Function to initialize the adapter settings.
*/
-int pqisrc_init(pqisrc_softstate_t *softs)
+int
+pqisrc_init(pqisrc_softstate_t *softs)
{
int ret = 0;
int i = 0, j = 0;
DBG_FUNC("IN\n");
-
+
check_struct_sizes();
-
+
/* Init the Sync interface */
ret = pqisrc_sis_init(softs);
if (ret) {
@@ -812,7 +1064,7 @@ int pqisrc_init(pqisrc_softstate_t *softs)
DBG_ERR(" Failed to configure Report events\n");
goto err_event;
}
-
+
/* Set event configuration*/
ret = pqisrc_set_event_config(softs);
if(ret){
@@ -834,7 +1086,7 @@ int pqisrc_init(pqisrc_softstate_t *softs)
goto err_host_wellness;
}
-
+
os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
if(ret){
@@ -844,20 +1096,14 @@ int pqisrc_init(pqisrc_softstate_t *softs)
}
softs->devlist_lockcreated = true;
- OS_ATOMIC64_SET(softs, num_intrs, 0);
- softs->prev_num_intrs = softs->num_intrs;
-
/* Get the PQI configuration table to read heart-beat counter*/
- if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
- ret = pqisrc_process_config_table(softs);
- if (ret) {
- DBG_ERR("Failed to process PQI configuration table %d\n", ret);
- goto err_config_tab;
- }
+ ret = pqisrc_process_config_table(softs);
+ if (ret) {
+ DBG_ERR("Failed to process PQI configuration table %d\n", ret);
+ goto err_config_tab;
}
- if (PQI_NEW_HEARTBEAT_MECHANISM(softs))
- softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
+ softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
/* Init device list */
for(i = 0; i < PQI_MAX_DEVICES; i++)
@@ -870,15 +1116,14 @@ int pqisrc_init(pqisrc_softstate_t *softs)
return ret;
err_config_tab:
- if(softs->devlist_lockcreated==true){
+ if(softs->devlist_lockcreated==true){
os_uninit_spinlock(&softs->devlist_lock);
softs->devlist_lockcreated = false;
- }
+ }
err_lock:
err_fw_version:
err_event:
err_host_wellness:
- os_destroy_intr(softs);
err_intr:
pqisrc_pqi_uninit(softs);
err_pqi:
@@ -894,7 +1139,8 @@ err_out:
* Write all data in the adapter's battery-backed cache to
* storage.
*/
-int pqisrc_flush_cache( pqisrc_softstate_t *softs,
+int
+pqisrc_flush_cache( pqisrc_softstate_t *softs,
enum pqisrc_flush_cache_event_type event_type)
{
int rval = PQI_STATUS_SUCCESS;
@@ -906,7 +1152,7 @@ int pqisrc_flush_cache( pqisrc_softstate_t *softs,
if (pqisrc_ctrl_offline(softs))
return PQI_STATUS_FAILURE;
- flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
+ flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
if (!flush_buff) {
DBG_ERR("Failed to allocate memory for flush cache params\n");
rval = PQI_STATUS_FAILURE;
@@ -936,7 +1182,8 @@ int pqisrc_flush_cache( pqisrc_softstate_t *softs,
/*
* Uninitialize the adapter.
*/
-void pqisrc_uninit(pqisrc_softstate_t *softs)
+void
+pqisrc_uninit(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
@@ -946,8 +1193,6 @@ void pqisrc_uninit(pqisrc_softstate_t *softs)
os_destroy_semaphore(&softs->scan_lock);
- os_destroy_intr(softs);
-
pqisrc_cleanup_devices(softs);
DBG_FUNC("OUT\n");
diff --git a/sys/dev/smartpqi/smartpqi_intr.c b/sys/dev/smartpqi/smartpqi_intr.c
index d31112a86c69..e22bbbb1f18c 100644
--- a/sys/dev/smartpqi/smartpqi_intr.c
+++ b/sys/dev/smartpqi/smartpqi_intr.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,7 +30,8 @@
/*
* Function to get processor count
*/
-int os_get_processor_config(pqisrc_softstate_t *softs)
+int
+os_get_processor_config(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
softs->num_cpus_online = mp_ncpus;
@@ -43,18 +43,15 @@ int os_get_processor_config(pqisrc_softstate_t *softs)
/*
* Function to get interrupt count and type supported
*/
-int os_get_intr_config(pqisrc_softstate_t *softs)
+int
+os_get_intr_config(pqisrc_softstate_t *softs)
{
- device_t dev;
- int msi_count = 0;
- int error = 0;
- int ret = PQI_STATUS_SUCCESS;
- dev = softs->os_specific.pqi_dev;
+ device_t dev = softs->os_specific.pqi_dev;
+ int msi_count = pci_msix_count(dev);
+ int error = BSD_SUCCESS;
DBG_FUNC("IN\n");
- msi_count = pci_msix_count(dev);
-
if (msi_count > softs->num_cpus_online)
msi_count = softs->num_cpus_online;
if (msi_count > PQI_MAX_MSIX)
@@ -90,21 +87,21 @@ int os_get_intr_config(pqisrc_softstate_t *softs)
softs->intr_count = 1;
}
- if(!softs->intr_type) {
- DBG_FUNC("OUT failed\n");
- ret = PQI_STATUS_FAILURE;
- return ret;
- }
DBG_FUNC("OUT\n");
- return ret;
+
+ error = bsd_status_to_pqi_status(BSD_SUCCESS);
+
+ return error;
}
-void os_eventtaskqueue_enqueue(pqisrc_softstate_t *sc)
+void
+os_eventtaskqueue_enqueue(pqisrc_softstate_t *sc)
{
taskqueue_enqueue(taskqueue_swi, &sc->os_specific.event_task);
}
-void pqisrc_event_worker(void *arg1, int arg2)
+void
+pqisrc_event_worker(void *arg1, int arg2)
{
pqisrc_ack_all_events(arg1);
}
@@ -112,7 +109,8 @@ void pqisrc_event_worker(void *arg1, int arg2)
/*
* ithread routine to handle uniprocessor systems
*/
-static void shared_ithread_routine(void *arg)
+static void
+shared_ithread_routine(void *arg)
{
pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
@@ -120,6 +118,9 @@ static void shared_ithread_routine(void *arg)
DBG_FUNC("IN\n");
+ if (softs == NULL)
+ return;
+
pqisrc_process_response_queue(softs, oq_id);
pqisrc_process_event_intr_src(softs, oq_id - 1);
@@ -129,7 +130,8 @@ static void shared_ithread_routine(void *arg)
/*
* ithread routine to process non event response
*/
-static void common_ithread_routine(void *arg)
+static void
+common_ithread_routine(void *arg)
{
pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
@@ -137,12 +139,16 @@ static void common_ithread_routine(void *arg)
DBG_FUNC("IN\n");
+ if (softs == NULL)
+ return;
+
pqisrc_process_response_queue(softs, oq_id);
DBG_FUNC("OUT\n");
}
-static void event_ithread_routine(void *arg)
+static void
+event_ithread_routine(void *arg)
{
pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg;
pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev);
@@ -150,6 +156,9 @@ static void event_ithread_routine(void *arg)
DBG_FUNC("IN\n");
+ if (softs == NULL)
+ return;
+
pqisrc_process_event_intr_src(softs, oq_id);
DBG_FUNC("OUT\n");
@@ -158,34 +167,34 @@ static void event_ithread_routine(void *arg)
/*
* Registration of legacy interrupt in case MSI is unsupported
*/
-int register_legacy_intr(pqisrc_softstate_t *softs)
+int
+register_legacy_intr(pqisrc_softstate_t *softs)
{
- int error = 0;
- device_t dev;
+ int error = BSD_SUCCESS;
+ device_t dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
- dev = softs->os_specific.pqi_dev;
-
softs->os_specific.pqi_irq_rid[0] = 0;
softs->os_specific.pqi_irq[0] = bus_alloc_resource_any(dev, \
SYS_RES_IRQ, &softs->os_specific.pqi_irq_rid[0],
RF_ACTIVE | RF_SHAREABLE);
if (NULL == softs->os_specific.pqi_irq[0]) {
DBG_ERR("Failed to allocate resource for interrupt\n");
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
if ((softs->os_specific.msi_ctx = os_mem_alloc(softs,sizeof(pqi_intr_ctx_t))) == NULL) {
DBG_ERR("Failed to allocate memory for msi_ctx\n");
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
softs->os_specific.msi_ctx[0].pqi_dev = dev;
+ /* For Legacy support oq_id should be one */
softs->os_specific.msi_ctx[0].oq_id = 1;
error = bus_setup_intr(dev, softs->os_specific.pqi_irq[0],
INTR_TYPE_CAM | INTR_MPSAFE, \
NULL, shared_ithread_routine,
- &softs->os_specific.msi_ctx[0],
+ &softs->os_specific.msi_ctx[0],
&softs->os_specific.intrcookie[0]);
if (error) {
DBG_ERR("Failed to setup legacy interrupt err = %d\n", error);
@@ -199,19 +208,24 @@ int register_legacy_intr(pqisrc_softstate_t *softs)
}
/*
- * Registration of MSIx
+ * Registration of MSIx
*/
-int register_msix_intr(pqisrc_softstate_t *softs)
+int
+register_msix_intr(pqisrc_softstate_t *softs)
{
- int error = 0;
+ int error = BSD_SUCCESS;
int i = 0;
- device_t dev;
- dev = softs->os_specific.pqi_dev;
+ device_t dev = softs->os_specific.pqi_dev;
int msix_count = softs->intr_count;
DBG_FUNC("IN\n");
softs->os_specific.msi_ctx = os_mem_alloc(softs, sizeof(pqi_intr_ctx_t) * msix_count);
+ if (!softs->os_specific.msi_ctx) {
+ DBG_ERR("Memory allocation failed\n");
+ return ENXIO;
+ }
+
/*Add shared handler */
if (softs->share_opq_and_eventq) {
softs->os_specific.pqi_irq_rid[i] = i+1;
@@ -222,12 +236,12 @@ int register_msix_intr(pqisrc_softstate_t *softs)
if (NULL == softs->os_specific.pqi_irq[i]) {
DBG_ERR("Failed to allocate \
event interrupt resource\n");
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
-
+
softs->os_specific.msi_ctx[i].pqi_dev = dev;
softs->os_specific.msi_ctx[i].oq_id = i+1;
-
+
error = bus_setup_intr(dev,softs->os_specific.pqi_irq[i],
INTR_TYPE_CAM | INTR_MPSAFE,\
NULL,
@@ -236,7 +250,7 @@ int register_msix_intr(pqisrc_softstate_t *softs)
&softs->os_specific.intrcookie[i]);
if (error) {
- DBG_ERR("Failed to setup interrupt for events r=%d\n",
+ DBG_ERR("Failed to setup interrupt for events r=%d\n",
error);
return error;
}
@@ -250,15 +264,12 @@ int register_msix_intr(pqisrc_softstate_t *softs)
&softs->os_specific.pqi_irq_rid[i],
RF_SHAREABLE | RF_ACTIVE);
if (NULL == softs->os_specific.pqi_irq[i]) {
- DBG_ERR("ERR : Failed to allocate \
- event interrupt resource\n");
- return PQI_STATUS_FAILURE;
+ DBG_ERR("Failed to allocate event interrupt resource\n");
+ return ENXIO;
}
-
-
+
softs->os_specific.msi_ctx[i].pqi_dev = dev;
softs->os_specific.msi_ctx[i].oq_id = i;
-
error = bus_setup_intr(dev,softs->os_specific.pqi_irq[i],
INTR_TYPE_CAM | INTR_MPSAFE,\
@@ -283,7 +294,7 @@ int register_msix_intr(pqisrc_softstate_t *softs)
if (NULL == softs->os_specific.pqi_irq[i]) {
DBG_ERR("Failed to allocate \
msi/x interrupt resource\n");
- return PQI_STATUS_FAILURE;
+ return ENXIO;
}
softs->os_specific.msi_ctx[i].pqi_dev = dev;
softs->os_specific.msi_ctx[i].oq_id = i;
@@ -311,38 +322,40 @@ int register_msix_intr(pqisrc_softstate_t *softs)
/*
* Setup interrupt depending on the configuration
*/
-int os_setup_intr(pqisrc_softstate_t *softs)
+int
+os_setup_intr(pqisrc_softstate_t *softs)
{
- int error = 0;
+ int bsd_status, pqi_status;
DBG_FUNC("IN\n");
if (softs->intr_type == INTR_TYPE_FIXED) {
- error = register_legacy_intr(softs);
+ bsd_status = register_legacy_intr(softs);
}
else {
- error = register_msix_intr(softs);
- }
- if (error) {
- DBG_FUNC("OUT failed error = %d\n", error);
- return error;
+ bsd_status = register_msix_intr(softs);
}
- DBG_FUNC("OUT error = %d\n", error);
+ if(bsd_status)
+ DBG_WARN("interrupt registration is failed, error = %d\n", bsd_status);
- return error;
+ pqi_status = bsd_status_to_pqi_status(bsd_status);
+
+ DBG_FUNC("OUT\n");
+
+ return pqi_status;
}
/*
* Deregistration of legacy interrupt
*/
-void deregister_pqi_intx(pqisrc_softstate_t *softs)
+void
+deregister_pqi_intx(pqisrc_softstate_t *softs)
{
- device_t dev;
+ device_t dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
- dev = softs->os_specific.pqi_dev;
if (softs->os_specific.pqi_irq[0] != NULL) {
if (softs->os_specific.intr_registered[0]) {
bus_teardown_intr(dev, softs->os_specific.pqi_irq[0],
@@ -362,10 +375,10 @@ void deregister_pqi_intx(pqisrc_softstate_t *softs)
/*
* Deregistration of MSIx interrupt
*/
-void deregister_pqi_msix(pqisrc_softstate_t *softs)
+void
+deregister_pqi_msix(pqisrc_softstate_t *softs)
{
- device_t dev;
- dev = softs->os_specific.pqi_dev;
+ device_t dev = softs->os_specific.pqi_dev;
int msix_count = softs->intr_count;
int i = 0;
@@ -395,10 +408,10 @@ void deregister_pqi_msix(pqisrc_softstate_t *softs)
/*
* Function to destroy interrupts registered
*/
-int os_destroy_intr(pqisrc_softstate_t *softs)
+int
+os_destroy_intr(pqisrc_softstate_t *softs)
{
- device_t dev;
- dev = softs->os_specific.pqi_dev;
+ device_t dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
@@ -411,7 +424,7 @@ int os_destroy_intr(pqisrc_softstate_t *softs)
pci_release_msi(dev);
softs->os_specific.msi_enabled = FALSE;
}
-
+
DBG_FUNC("OUT\n");
return PQI_STATUS_SUCCESS;
@@ -420,10 +433,10 @@ int os_destroy_intr(pqisrc_softstate_t *softs)
/*
* Free interrupt related resources for the adapter
*/
-void os_free_intr_config(pqisrc_softstate_t *softs)
+void
+os_free_intr_config(pqisrc_softstate_t *softs)
{
- device_t dev;
- dev = softs->os_specific.pqi_dev;
+ device_t dev = softs->os_specific.pqi_dev;
DBG_FUNC("IN\n");
diff --git a/sys/dev/smartpqi/smartpqi_ioctl.c b/sys/dev/smartpqi/smartpqi_ioctl.c
index 471d37c543d4..9f43f2e1faa2 100644
--- a/sys/dev/smartpqi/smartpqi_ioctl.c
+++ b/sys/dev/smartpqi/smartpqi_ioctl.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,7 +34,8 @@
/*
* Wrapper function to copy to user from kernel
*/
-int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
+int
+os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
void *src_buf, int size, int mode)
{
return(copyout(src_buf, dest_buf, size));
@@ -44,7 +44,8 @@ int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf,
/*
* Wrapper function to copy from user to kernel
*/
-int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
+int
+os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
void *src_buf, int size, int mode)
{
return(copyin(src_buf, dest_buf, size));
@@ -53,39 +54,38 @@ int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf,
/*
* Device open function for ioctl entry
*/
-static int smartpqi_open(struct cdev *cdev, int flags, int devtype,
+static int
+smartpqi_open(struct cdev *cdev, int flags, int devtype,
struct thread *td)
{
- int error = PQI_STATUS_SUCCESS;
-
- return error;
+ return BSD_SUCCESS;
}
/*
- * Device close function for ioctl entry
+ * Device close function for ioctl entry
*/
-static int smartpqi_close(struct cdev *cdev, int flags, int devtype,
+static int
+smartpqi_close(struct cdev *cdev, int flags, int devtype,
struct thread *td)
{
- int error = PQI_STATUS_SUCCESS;
-
- return error;
+ return BSD_SUCCESS;
}
/*
* ioctl for getting driver info
*/
-static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
+static void
+smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
{
struct pqisrc_softstate *softs = cdev->si_drv1;
pdriver_info driver_info = (pdriver_info)udata;
DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev);
- driver_info->major_version = PQISRC_DRIVER_MAJOR;
- driver_info->minor_version = PQISRC_DRIVER_MINOR;
- driver_info->release_version = PQISRC_DRIVER_RELEASE;
- driver_info->build_revision = PQISRC_DRIVER_REVISION;
+ driver_info->major_version = PQISRC_OS_VERSION;
+ driver_info->minor_version = PQISRC_FEATURE_VERSION;
+ driver_info->release_version = PQISRC_PATCH_VERSION;
+ driver_info->build_revision = PQISRC_BUILD_VERSION;
driver_info->max_targets = PQI_MAX_DEVICES - 1;
driver_info->max_io = softs->max_io_for_scsi_ml;
driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size;
@@ -96,7 +96,8 @@ static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev)
/*
* ioctl for getting controller info
*/
-static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
+static void
+smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
{
struct pqisrc_softstate *softs = cdev->si_drv1;
device_t dev = softs->os_specific.pqi_dev;
@@ -120,49 +121,62 @@ static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev)
DBG_FUNC("OUT\n");
}
+static inline int
+pqi_status_to_bsd_ioctl_status(int pqi_status)
+{
+ if (PQI_STATUS_SUCCESS == pqi_status)
+ return BSD_SUCCESS;
+ else
+ return EIO;
+}
+
/*
* ioctl entry point for user
*/
-static int smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
+static int
+smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata,
int flags, struct thread *td)
{
- int error = PQI_STATUS_SUCCESS;
+ int bsd_status, pqi_status;
struct pqisrc_softstate *softs = cdev->si_drv1;
DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev);
if (!udata) {
DBG_ERR("udata is null !!\n");
+ return EINVAL;
}
if (pqisrc_ctrl_offline(softs)){
- DBG_ERR("Controller s offline !!\n");
return ENOTTY;
}
switch (cmd) {
case CCISS_GETDRIVVER:
smartpqi_get_driver_info_ioctl(udata, cdev);
+ bsd_status = BSD_SUCCESS;
break;
case CCISS_GETPCIINFO:
smartpqi_get_pci_info_ioctl(udata, cdev);
+ bsd_status = BSD_SUCCESS;
break;
case SMARTPQI_PASS_THRU:
case CCISS_PASSTHRU:
- error = pqisrc_passthru_ioctl(softs, udata, 0);
- error = PQI_STATUS_SUCCESS;
+ pqi_status = pqisrc_passthru_ioctl(softs, udata, 0);
+ bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
break;
case CCISS_REGNEWD:
- error = pqisrc_scan_devices(softs);
+ pqi_status = pqisrc_scan_devices(softs);
+ bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status);
break;
default:
- DBG_WARN( "!IOCTL cmd 0x%lx not supported", cmd);
- error = ENOTTY;
+ DBG_WARN( "!IOCTL cmd 0x%lx not supported\n", cmd);
+ bsd_status = ENOTTY;
break;
}
- DBG_FUNC("OUT error = %d\n", error);
- return error;
+ DBG_FUNC("OUT error = %d\n", bsd_status);
+ return bsd_status;
}
static struct cdevsw smartpqi_cdevsw =
@@ -177,9 +191,10 @@ static struct cdevsw smartpqi_cdevsw =
/*
* Function to create device node for ioctl
*/
-int create_char_dev(struct pqisrc_softstate *softs, int card_index)
+int
+create_char_dev(struct pqisrc_softstate *softs, int card_index)
{
- int error = PQI_STATUS_SUCCESS;
+ int error = BSD_SUCCESS;
DBG_FUNC("IN idx = %d\n", card_index);
@@ -189,17 +204,19 @@ int create_char_dev(struct pqisrc_softstate *softs, int card_index)
if(softs->os_specific.cdev) {
softs->os_specific.cdev->si_drv1 = softs;
} else {
- error = PQI_STATUS_FAILURE;
+ error = ENXIO;
}
DBG_FUNC("OUT error = %d\n", error);
+
return error;
}
/*
* Function to destroy device node for ioctl
*/
-void destroy_char_dev(struct pqisrc_softstate *softs)
+void
+destroy_char_dev(struct pqisrc_softstate *softs)
{
DBG_FUNC("IN\n");
if (softs->os_specific.cdev) {
@@ -229,7 +246,7 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
memset(&request, 0, sizeof(request));
memset(&error_info, 0, sizeof(error_info));
-
+
DBG_FUNC("IN");
if (pqisrc_ctrl_offline(softs))
@@ -238,7 +255,7 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
if (!arg)
return (PQI_STATUS_FAILURE);
- if (iocommand->buf_size < 1 &&
+ if (iocommand->buf_size < 1 &&
iocommand->Request.Type.Direction != PQIIOCTL_NONE)
return PQI_STATUS_FAILURE;
if (iocommand->Request.CDBLen > sizeof(request.cdb))
@@ -266,14 +283,15 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
ret = PQI_STATUS_FAILURE;
goto out;
}
-
+
DBG_INFO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr);
DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr);
drv_buf = (char *)ioctl_dma_buf.virt_addr;
if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) {
- if ((ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf,
- iocommand->buf_size, mode)) != 0) {
+ if ((ret = os_copy_from_user(softs, (void *)drv_buf,
+ (void *)iocommand->buf,
+ iocommand->buf_size, mode)) != 0) {
ret = PQI_STATUS_FAILURE;
goto free_mem;
}
@@ -281,9 +299,9 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
}
request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
- request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
+ request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) -
PQI_REQUEST_HEADER_LENGTH;
- memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
+ memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes,
sizeof(request.lun_number));
memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen);
request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
@@ -319,8 +337,11 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
request.request_id = tag;
request.response_queue_id = ob_q->q_id;
request.error_index = request.request_id;
- rcb = &softs->rcb[tag];
+ if (softs->timeout_in_passthrough) {
+ request.timeout_in_sec = iocommand->Request.Timeout;
+ }
+ rcb = &softs->rcb[tag];
rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
rcb->tag = tag;
@@ -332,7 +353,8 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
goto err_out;
}
- ret = pqisrc_wait_on_condition(softs, rcb);
+ ret = pqisrc_wait_on_condition(softs, rcb,
+ PQISRC_PASSTHROUGH_CMD_TIMEOUT);
if (ret != PQI_STATUS_SUCCESS) {
DBG_ERR("Passthru IOCTL cmd timed out !!\n");
goto err_out;
@@ -340,6 +362,7 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
memset(&iocommand->error_info, 0, sizeof(iocommand->error_info));
+
if (rcb->status) {
size_t sense_data_length;
@@ -350,7 +373,7 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
if (!sense_data_length)
sense_data_length = error_info.resp_data_len;
- if (sense_data_length &&
+ if (sense_data_length &&
(sense_data_length > sizeof(error_info.data)))
sense_data_length = sizeof(error_info.data);
@@ -364,22 +387,23 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
iocommand->error_info.SenseLen = sense_data_length;
}
- if (error_info.data_out_result ==
+ if (error_info.data_out_result ==
PQI_RAID_DATA_IN_OUT_UNDERFLOW){
rcb->status = REQUEST_SUCCESS;
}
}
- if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
+ if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 &&
(iocommand->Request.Type.Direction & PQIIOCTL_READ)) {
- if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
+
+ if ((ret = os_copy_to_user(softs, (void*)iocommand->buf,
(void*)drv_buf, iocommand->buf_size, mode)) != 0) {
- DBG_ERR("Failed to copy the response\n");
+ DBG_ERR("Failed to copy the response\n");
goto err_out;
}
}
- os_reset_rcb(rcb);
+ os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request.request_id);
if (iocommand->buf_size > 0)
os_dma_mem_free(softs,&ioctl_dma_buf);
@@ -387,7 +411,7 @@ pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode)
DBG_FUNC("OUT\n");
return ret;
err_out:
- os_reset_rcb(rcb);
+ os_reset_rcb(rcb);
pqisrc_put_tag(&softs->taglist, request.request_id);
free_mem:
diff --git a/sys/dev/smartpqi/smartpqi_ioctl.h b/sys/dev/smartpqi/smartpqi_ioctl.h
index 1c8c32ac64a3..42b091bcb491 100644
--- a/sys/dev/smartpqi/smartpqi_ioctl.h
+++ b/sys/dev/smartpqi/smartpqi_ioctl.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +31,7 @@
/* IOCTL passthrough macros and structures */
#define SENSEINFOBYTES 32 /* note that this value may vary
- between host implementations */
+ between host implementations */
/* transfer direction */
#define PQIIOCTL_NONE 0x00
@@ -40,12 +39,14 @@
#define PQIIOCTL_READ 0x02
#define PQIIOCTL_BIDIRECTIONAL (PQIIOCTL_READ | PQIIOCTL_WRITE)
+
/* Type defs used in the following structs */
#define BYTE uint8_t
#define WORD uint16_t
#define HWORD uint16_t
#define DWORD uint32_t
+
/* Command List Structure */
typedef union _SCSI3Addr_struct {
struct {
@@ -72,14 +73,14 @@ typedef struct _PhysDevAddr_struct {
DWORD Bus:6;
DWORD Mode:2;
SCSI3Addr_struct Target[2]; /* 2 level target device addr */
-
+
}OS_ATTRIBUTE_PACKED PhysDevAddr_struct;
typedef struct _LogDevAddr_struct {
DWORD VolId:30;
DWORD Mode:2;
BYTE reserved[4];
-
+
}OS_ATTRIBUTE_PACKED LogDevAddr_struct;
typedef union _LUNAddr_struct {
@@ -91,7 +92,7 @@ typedef union _LUNAddr_struct {
}OS_ATTRIBUTE_PACKED LUNAddr_struct;
typedef struct _RequestBlock_struct {
- BYTE CDBLen;
+ BYTE CDBLen;
struct {
BYTE Type:3;
BYTE Attribute:3;
@@ -100,13 +101,13 @@ typedef struct _RequestBlock_struct {
HWORD Timeout;
BYTE CDB[16];
-}OS_ATTRIBUTE_PACKED RequestBlock_struct;
+}OS_ATTRIBUTE_PACKED RequestBlock_struct;
typedef union _MoreErrInfo_struct{
struct {
- BYTE Reserved[3];
- BYTE Type;
- DWORD ErrorInfo;
+ BYTE Reserved[3];
+ BYTE Type;
+ DWORD ErrorInfo;
} Common_Info;
struct{
BYTE Reserved[2];
@@ -127,13 +128,15 @@ typedef struct _ErrorInfo_struct {
}OS_ATTRIBUTE_PACKED ErrorInfo_struct;
+
typedef struct pqi_ioctl_passthruCmd_struct {
LUNAddr_struct LUN_info;
RequestBlock_struct Request;
- ErrorInfo_struct error_info;
+ ErrorInfo_struct error_info;
WORD buf_size; /* size in bytes of the buf */
passthru_buf_type_t buf;
}OS_ATTRIBUTE_PACKED IOCTL_Command_struct;
+
#endif /* _PQI_IOCTL_H_ */
diff --git a/sys/dev/smartpqi/smartpqi_main.c b/sys/dev/smartpqi/smartpqi_main.c
index fd86dff75a1b..b4efa26b9784 100644
--- a/sys/dev/smartpqi/smartpqi_main.c
+++ b/sys/dev/smartpqi/smartpqi_main.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,6 +32,8 @@
#include "smartpqi_includes.h"
#include "smartpqi_prototypes.h"
+CTASSERT(BSD_SUCCESS == PQI_STATUS_SUCCESS);
+
/*
* Supported devices
*/
@@ -46,55 +47,69 @@ struct pqi_ident
char *desc;
} pqi_identifiers[] = {
/* (MSCC PM8205 8x12G based) */
- {0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"},
{0x9005, 0x028f, 0x1028, 0x1FE0, PQI_HWIF_SRCV, "SmartRAID 3162-8i/eDell"},
- {0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"},
- {0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"},
+ {0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"},
+ {0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"},
/* (MSCC PM8225 8x12G based) */
- {0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"},
/* (MSCC PM8221 8x12G based) */
- {0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"},
- {0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"},
+ {0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"},
+ {0x9005, 0x028f, 0x193d, 0x1104, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-2GB"},
+ {0x9005, 0x028f, 0x193d, 0x1106, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-1GB"},
+ {0x9005, 0x028f, 0x193d, 0x1108, PQI_HWIF_SRCV, "UN RAID P4408-Ma-8i-2GB"},
+
/* (MSCC PM8204 8x12G based) */
- {0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"},
- {0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"},
- {0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"},
- {0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"},
- {0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"},
- {0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"},
- {0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"},
- {0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"},
+ {0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"},
+ {0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"},
+ {0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"},
+ {0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"},
+ {0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"},
+ {0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"},
+ {0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"},
+ {0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"},
{0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"},
{0x9005, 0x028f, 0x193d, 0xf460, PQI_HWIF_SRCV, "UN RAID P460-M4"},
{0x9005, 0x028f, 0x193d, 0xf461, PQI_HWIF_SRCV, "UN RAID P460-B4"},
- {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "INSPUR RAID PM8204-2GB"},
- {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "INSPUR RAID PM8204-4GB"},
+ {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "INSPUR PM8204-2GB"},
+ {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "INSPUR PM8204-4GB"},
+ {0x9005, 0x028f, 0x193d, 0x1105, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-2GB"},
+ {0x9005, 0x028f, 0x193d, 0x1107, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-4GB"},
+ {0x9005, 0x028f, 0x1d8d, 0x800, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8204-8i"},
+ {0x9005, 0x028f, 0x9005, 0x0808, PQI_HWIF_SRCV, "SmartRAID 3101E-4i"},
+ {0x9005, 0x028f, 0x9005, 0x0809, PQI_HWIF_SRCV, "SmartRAID 3102E-8i"},
+ {0x9005, 0x028f, 0x9005, 0x080a, PQI_HWIF_SRCV, "SmartRAID 3152-8i/N"},
/* (MSCC PM8222 8x12G based) */
- {0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"},
- {0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"},
- {0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"},
- {0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"},
- {0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"},
- {0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"},
- {0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"},
- {0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"},
- {0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"},
- {0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"},
+ {0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"},
+ {0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"},
+ {0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"},
+ {0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"},
+ {0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"},
+ {0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"},
+ {0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"},
+ {0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"},
+ {0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"},
+ {0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"},
{0x9005, 0x028f, 0x193d, 0x8460, PQI_HWIF_SRCV, "UN HBA H460-M1"},
{0x9005, 0x028f, 0x193d, 0x8461, PQI_HWIF_SRCV, "UN HBA H460-B1"},
- {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "INSPUR SMART-HBA PM8222-SHBA"},
+ {0x9005, 0x028f, 0x193d, 0xc460, PQI_HWIF_SRCV, "UN RAID P460-M2"},
+ {0x9005, 0x028f, 0x193d, 0xc461, PQI_HWIF_SRCV, "UN RAID P460-B2"},
+ {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "INSPUR PM8222-SHBA"},
{0x9005, 0x028f, 0x13fe, 0x8312, PQI_HWIF_SRCV, "MIC-8312BridgeB"},
+ {0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "INSPUR PM8222-HBA"},
+ {0x9005, 0x028f, 0x1d8d, 0x908, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8222-8i"},
/* (SRCx MSCC FVB 24x12G based) */
{0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"},
@@ -114,6 +129,7 @@ struct pqi_ident
{0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"},
{0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"},
{0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "INSPUR RAID 8236-16i"},
+ {0x9005, 0x028f, 0x1d8d, 0x806, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8236-16i"},
/* (MSCC PM8237 24x12G based) */
{0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"},
@@ -126,6 +142,8 @@ struct pqi_ident
{0x9005, 0x028f, 0x105b, 0x1211, PQI_HWIF_SRCV, "8238-16i"},
{0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8238-16i"},
{0x9005, 0x028f, 0x9005, 0x1282, PQI_HWIF_SRCV, "SmartHBA 2100-16i"},
+ {0x9005, 0x028f, 0x1d8d, 0x916, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8238-16i"},
+ {0x9005, 0x028f, 0x1458, 0x1000, PQI_HWIF_SRCV, "GIGABYTE SmartHBA CLN1832"},
/* (MSCC PM8240 24x12G based) */
{0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"},
@@ -133,6 +151,15 @@ struct pqi_ident
{0x9005, 0x028f, 0x9005, 0x1201, PQI_HWIF_SRCV, "SmartRAID 3154-8i16e"},
{0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"},
{0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "INSPUR RAID 8240-24i"},
+
+ /* Huawei ID's */
+ {0x9005, 0x028f, 0x19e5, 0xd227, PQI_HWIF_SRCV, "SR465C-M 4G"},
+ {0x9005, 0x028f, 0x19e5, 0xd22a, PQI_HWIF_SRCV, "SR765-M"},
+ {0x9005, 0x028f, 0x19e5, 0xd228, PQI_HWIF_SRCV, "SR455C-M 2G"},
+ {0x9005, 0x028f, 0x19e5, 0xd22c, PQI_HWIF_SRCV, "SR455C-M 4G"},
+ {0x9005, 0x028f, 0x19e5, 0xd229, PQI_HWIF_SRCV, "SR155-M"},
+ {0x9005, 0x028f, 0x19e5, 0xd22b, PQI_HWIF_SRCV, "SR455C-ME 4G"},
+
{0, 0, 0, 0, 0, 0}
};
@@ -145,8 +172,8 @@ pqi_family_identifiers[] = {
/*
* Function to identify the installed adapter.
*/
-static struct pqi_ident *
-pqi_find_ident(device_t dev)
+static struct
+pqi_ident *pqi_find_ident(device_t dev)
{
struct pqi_ident *m;
u_int16_t vendid, devid, sub_vendid, sub_devid;
@@ -182,7 +209,7 @@ smartpqi_probe(device_t dev)
struct pqi_ident *id;
if ((id = pqi_find_ident(dev)) != NULL) {
- device_set_desc(dev, id->desc);
+ device_set_desc(dev, id->desc);
return(BUS_PROBE_VENDOR);
}
@@ -192,15 +219,17 @@ smartpqi_probe(device_t dev)
/*
* Store Bus/Device/Function in softs
*/
-void pqisrc_save_controller_info(struct pqisrc_softstate *softs)
+void
+pqisrc_save_controller_info(struct pqisrc_softstate *softs)
{
device_t dev = softs->os_specific.pqi_dev;
softs->bus_id = (uint32_t)pci_get_bus(dev);
softs->device_id = (uint32_t)pci_get_device(dev);
- softs->func_id = (uint32_t)pci_get_function(dev);
+ softs->func_id = (uint32_t)pci_get_function(dev);
}
+
/*
* Allocate resources for our device, set up the bus interface.
* Initialize the PQI related functionality, scan devices, register sim to
@@ -211,7 +240,7 @@ smartpqi_attach(device_t dev)
{
struct pqisrc_softstate *softs = NULL;
struct pqi_ident *id = NULL;
- int error = 0;
+ int error = BSD_SUCCESS;
u_int32_t command = 0, i = 0;
int card_index = device_get_unit(dev);
rcb_t *rcbp = NULL;
@@ -234,7 +263,7 @@ smartpqi_attach(device_t dev)
/* assume failure is 'not configured' */
error = ENXIO;
- /*
+ /*
* Verify that the adapter is correctly set up in PCI space.
*/
pci_enable_busmaster(softs->os_specific.pqi_dev);
@@ -245,16 +274,21 @@ smartpqi_attach(device_t dev)
goto out;
}
- /*
+ /*
* Detect the hardware interface version, set up the bus interface
* indirection.
*/
id = pqi_find_ident(dev);
+ if (!id) {
+ DBG_ERR("NULL return value from pqi_find_ident\n");
+ goto out;
+ }
+
softs->os_specific.pqi_hwif = id->hwif;
switch(softs->os_specific.pqi_hwif) {
case PQI_HWIF_SRCV:
- DBG_INFO("set hardware up for PMC SRCv for %p", softs);
+ DBG_INFO("set hardware up for PMC SRCv for %p\n", softs);
break;
default:
softs->os_specific.pqi_hwif = PQI_HWIF_UNKNOWN;
@@ -288,7 +322,7 @@ smartpqi_attach(device_t dev)
/*
* Allocate the parent bus DMA tag appropriate for our PCI interface.
- *
+ *
* Note that some of these controllers are 64-bit capable.
*/
if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
@@ -313,27 +347,31 @@ smartpqi_attach(device_t dev)
/* Initialize the PQI library */
error = pqisrc_init(softs);
- if (error) {
+ if (error != PQI_STATUS_SUCCESS) {
DBG_ERR("Failed to initialize pqi lib error = %d\n", error);
- error = PQI_STATUS_FAILURE;
+ error = ENXIO;
goto out;
}
+ else {
+ error = BSD_SUCCESS;
+ }
+
+ mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF);
+ softs->os_specific.mtx_init = TRUE;
+ mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF);
+
+ callout_init(&softs->os_specific.wellness_periodic, 1);
+ callout_init(&softs->os_specific.heartbeat_timeout_id, 1);
- mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF);
- softs->os_specific.mtx_init = TRUE;
- mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF);
- callout_init(&softs->os_specific.wellness_periodic, 1);
- callout_init(&softs->os_specific.heartbeat_timeout_id, 1);
-
- /*
- * Create DMA tag for mapping buffers into controller-addressable space.
- */
- if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */
- 1, 0, /* algnmnt, boundary */
+ /*
+ * Create DMA tag for mapping buffers into controller-addressable space.
+ */
+ if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */
+ PAGE_SIZE, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- softs->pqi_cap.max_sg_elem*PAGE_SIZE,/*maxsize*/
+ (bus_size_t)softs->pqi_cap.max_sg_elem*PAGE_SIZE,/* maxsize */
softs->pqi_cap.max_sg_elem, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
@@ -348,40 +386,37 @@ smartpqi_attach(device_t dev)
for( i = 1; i <= softs->pqi_cap.max_outstanding_io; i++, rcbp++ ) {
if ((error = bus_dmamap_create(softs->os_specific.pqi_buffer_dmat, 0, &rcbp->cm_datamap)) != 0) {
DBG_ERR("Cant create datamap for buf @"
- "rcbp = %p maxio = %d error = %d\n",
+ "rcbp = %p maxio = %d error = %d\n",
rcbp, softs->pqi_cap.max_outstanding_io, error);
goto dma_out;
}
}
os_start_heartbeat_timer((void *)softs); /* Start the heart-beat timer */
- callout_reset(&softs->os_specific.wellness_periodic, 120*hz,
- os_wellness_periodic, softs);
- /* Register our shutdown handler. */
- softs->os_specific.eh = EVENTHANDLER_REGISTER(shutdown_final,
- smartpqi_shutdown, softs, SHUTDOWN_PRI_DEFAULT);
+ callout_reset(&softs->os_specific.wellness_periodic, 120 * hz,
+ os_wellness_periodic, softs);
error = pqisrc_scan_devices(softs);
- if (error) {
+ if (error != PQI_STATUS_SUCCESS) {
DBG_ERR("Failed to scan lib error = %d\n", error);
- error = PQI_STATUS_FAILURE;
+ error = ENXIO;
goto out;
}
error = register_sim(softs, card_index);
if (error) {
- DBG_ERR("Failed to register sim index = %d error = %d\n",
+ DBG_ERR("Failed to register sim index = %d error = %d\n",
card_index, error);
goto out;
}
- smartpqi_target_rescan(softs);
+ smartpqi_target_rescan(softs);
TASK_INIT(&softs->os_specific.event_task, 0, pqisrc_event_worker,softs);
error = create_char_dev(softs, card_index);
if (error) {
- DBG_ERR("Failed to register character device index=%d r=%d\n",
+ DBG_ERR("Failed to register character device index=%d r=%d\n",
card_index, error);
goto out;
}
@@ -390,7 +425,7 @@ smartpqi_attach(device_t dev)
dma_out:
if (softs->os_specific.pqi_regs_res0 != NULL)
bus_release_resource(softs->os_specific.pqi_dev, SYS_RES_MEMORY,
- softs->os_specific.pqi_regs_rid0,
+ softs->os_specific.pqi_regs_rid0,
softs->os_specific.pqi_regs_res0);
out:
DBG_FUNC("OUT error = %d\n", error);
@@ -403,25 +438,35 @@ out:
static int
smartpqi_detach(device_t dev)
{
- struct pqisrc_softstate *softs = NULL;
- softs = device_get_softc(dev);
+ struct pqisrc_softstate *softs = device_get_softc(dev);
+ int rval = BSD_SUCCESS;
+
DBG_FUNC("IN\n");
- EVENTHANDLER_DEREGISTER(shutdown_final, softs->os_specific.eh);
+ if (softs == NULL)
+ return ENXIO;
/* kill the periodic event */
callout_drain(&softs->os_specific.wellness_periodic);
/* Kill the heart beat event */
callout_drain(&softs->os_specific.heartbeat_timeout_id);
- smartpqi_shutdown(softs);
+ if (!pqisrc_ctrl_offline(softs)) {
+ rval = pqisrc_flush_cache(softs, PQISRC_NONE_CACHE_FLUSH_ONLY);
+ if (rval != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to flush adapter cache! rval = %d\n", rval);
+ rval = EIO;
+ }
+ }
+
destroy_char_dev(softs);
pqisrc_uninit(softs);
deregister_sim(softs);
pci_release_msi(dev);
DBG_FUNC("OUT\n");
- return 0;
+
+ return rval;
}
/*
@@ -430,15 +475,19 @@ smartpqi_detach(device_t dev)
static int
smartpqi_suspend(device_t dev)
{
- struct pqisrc_softstate *softs;
- softs = device_get_softc(dev);
+ struct pqisrc_softstate *softs = device_get_softc(dev);
+
DBG_FUNC("IN\n");
+ if (softs == NULL)
+ return ENXIO;
+
DBG_INFO("Suspending the device %p\n", softs);
softs->os_specific.pqi_state |= SMART_STATE_SUSPEND;
DBG_FUNC("OUT\n");
- return(0);
+
+ return BSD_SUCCESS;
}
/*
@@ -447,37 +496,47 @@ smartpqi_suspend(device_t dev)
static int
smartpqi_resume(device_t dev)
{
- struct pqisrc_softstate *softs;
- softs = device_get_softc(dev);
+ struct pqisrc_softstate *softs = device_get_softc(dev);
+
DBG_FUNC("IN\n");
+ if (softs == NULL)
+ return ENXIO;
+
softs->os_specific.pqi_state &= ~SMART_STATE_SUSPEND;
DBG_FUNC("OUT\n");
- return(0);
+
+ return BSD_SUCCESS;
}
/*
* Do whatever is needed during a system shutdown.
*/
-int
-smartpqi_shutdown(void *arg)
+static int
+smartpqi_shutdown(device_t dev)
{
- struct pqisrc_softstate *softs = NULL;
- int rval = 0;
+ struct pqisrc_softstate *softs = device_get_softc(dev);
+ int bsd_status = BSD_SUCCESS;
+ int pqi_status;
DBG_FUNC("IN\n");
- softs = (struct pqisrc_softstate *)arg;
+ if (softs == NULL)
+ return ENXIO;
+
+ if (pqisrc_ctrl_offline(softs))
+ return BSD_SUCCESS;
- rval = pqisrc_flush_cache(softs, PQISRC_SHUTDOWN);
- if (rval != PQI_STATUS_SUCCESS) {
- DBG_ERR("Unable to flush adapter cache! rval = %d", rval);
+ pqi_status = pqisrc_flush_cache(softs, PQISRC_SHUTDOWN);
+ if (pqi_status != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to flush adapter cache! rval = %d\n", pqi_status);
+ bsd_status = EIO;
}
DBG_FUNC("OUT\n");
- return rval;
+ return bsd_status;
}
/*
@@ -490,10 +549,12 @@ static device_method_t pqi_methods[] = {
DEVMETHOD(device_detach, smartpqi_detach),
DEVMETHOD(device_suspend, smartpqi_suspend),
DEVMETHOD(device_resume, smartpqi_resume),
+ DEVMETHOD(device_shutdown, smartpqi_shutdown),
{ 0, 0 }
};
-static devclass_t pqi_devclass;
+static devclass_t pqi_devclass;
+
static driver_t smartpqi_pci_driver = {
"smartpqi",
pqi_methods,
diff --git a/sys/dev/smartpqi/smartpqi_mem.c b/sys/dev/smartpqi/smartpqi_mem.c
index 30dcc52f1506..1ed2a582e574 100644
--- a/sys/dev/smartpqi/smartpqi_mem.c
+++ b/sys/dev/smartpqi/smartpqi_mem.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,7 +27,7 @@
#include "smartpqi_includes.h"
-MALLOC_DEFINE(M_SMARTPQI, "smartpqi", "Buffers for the smartpqi(4) driver");
+MALLOC_DEFINE(M_SMARTPQI, "smartpqi", "Buffers for the smartpqi driver");
/*
* DMA map load callback function
@@ -40,15 +39,42 @@ os_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
*paddr = segs[0].ds_addr;
}
+int
+os_dma_setup(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+ DBG_FUNC("OUT\n");
+ return PQI_STATUS_SUCCESS;
+}
+
+int
+os_dma_destroy(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+ DBG_FUNC("OUT\n");
+ return PQI_STATUS_SUCCESS;
+}
+
+void
+os_update_dma_attributes(pqisrc_softstate_t *softs)
+{
+ DBG_FUNC("IN\n");
+ DBG_FUNC("OUT\n");
+}
+
/*
* DMA mem resource allocation wrapper function
*/
-int os_dma_mem_alloc(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
+int
+os_dma_mem_alloc(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
{
- int ret = 0;
+ int ret = BSD_SUCCESS;
/* DBG_FUNC("IN\n"); */
+ /* Make sure the alignment is at least 4 bytes */
+ ASSERT(dma_mem->align >= 4);
+
/* DMA memory needed - allocate it */
if ((ret = bus_dma_tag_create(
softs->os_specific.pqi_parent_dmat, /* parent */
@@ -65,14 +91,21 @@ int os_dma_mem_alloc(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
DBG_ERR("can't allocate DMA tag with error = 0x%x\n", ret);
goto err_out;
}
+
+ if (!dma_mem->dma_tag) {
+ DBG_ERR("dma tag is NULL\n");
+ ret = ENOMEM;
+ goto err_out;
+ }
+
if ((ret = bus_dmamem_alloc(dma_mem->dma_tag, (void **)&dma_mem->virt_addr,
- BUS_DMA_NOWAIT, &dma_mem->dma_map)) != 0) {
+ BUS_DMA_WAITOK, &dma_mem->dma_map)) != 0) {
DBG_ERR("can't allocate DMA memory for required object \
with error = 0x%x\n", ret);
goto err_mem;
}
- if((ret = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
+ if((ret = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
dma_mem->virt_addr, dma_mem->size,
os_dma_map, &dma_mem->dma_addr, 0)) != 0) {
DBG_ERR("can't load DMA memory for required \
@@ -82,25 +115,31 @@ int os_dma_mem_alloc(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
memset(dma_mem->virt_addr, 0, dma_mem->size);
+ ret = bsd_status_to_pqi_status(ret);
+
/* DBG_FUNC("OUT\n"); */
return ret;
err_load:
if(dma_mem->virt_addr)
- bus_dmamem_free(dma_mem->dma_tag, dma_mem->virt_addr,
+ bus_dmamem_free(dma_mem->dma_tag, dma_mem->virt_addr,
dma_mem->dma_map);
err_mem:
if(dma_mem->dma_tag)
bus_dma_tag_destroy(dma_mem->dma_tag);
err_out:
DBG_FUNC("failed OUT\n");
+
+ ret = bsd_status_to_pqi_status(ret);
+
return ret;
}
/*
* DMA mem resource deallocation wrapper function
*/
-void os_dma_mem_free(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
+void
+os_dma_mem_free(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
{
/* DBG_FUNC("IN\n"); */
@@ -123,19 +162,21 @@ void os_dma_mem_free(pqisrc_softstate_t *softs, struct dma_mem *dma_mem)
/* DBG_FUNC("OUT\n"); */
}
+
/*
* Mem resource allocation wrapper function
*/
-void *os_mem_alloc(pqisrc_softstate_t *softs, size_t size)
+void
+*os_mem_alloc(pqisrc_softstate_t *softs, size_t size)
{
void *addr = NULL;
- /* DBG_FUNC("IN\n"); */
+ /* DBG_FUNC("IN\n"); */
addr = malloc((unsigned long)size, M_SMARTPQI,
M_NOWAIT | M_ZERO);
-/* DBG_FUNC("OUT\n"); */
+ /* DBG_FUNC("OUT\n"); */
return addr;
}
@@ -143,8 +184,8 @@ void *os_mem_alloc(pqisrc_softstate_t *softs, size_t size)
/*
* Mem resource deallocation wrapper function
*/
-void os_mem_free(pqisrc_softstate_t *softs,
- char *addr, size_t size)
+void
+os_mem_free(pqisrc_softstate_t *softs, char *addr, size_t size)
{
/* DBG_FUNC("IN\n"); */
@@ -156,14 +197,15 @@ void os_mem_free(pqisrc_softstate_t *softs,
/*
* dma/bus resource deallocation wrapper function
*/
-void os_resource_free(pqisrc_softstate_t *softs)
+void
+os_resource_free(pqisrc_softstate_t *softs)
{
if(softs->os_specific.pqi_parent_dmat)
bus_dma_tag_destroy(softs->os_specific.pqi_parent_dmat);
if (softs->os_specific.pqi_regs_res0 != NULL)
bus_release_resource(softs->os_specific.pqi_dev,
- SYS_RES_MEMORY,
+ SYS_RES_MEMORY,
softs->os_specific.pqi_regs_rid0,
softs->os_specific.pqi_regs_res0);
}
diff --git a/sys/dev/smartpqi/smartpqi_misc.c b/sys/dev/smartpqi/smartpqi_misc.c
index 19cfcac01beb..632b398f1019 100644
--- a/sys/dev/smartpqi/smartpqi_misc.c
+++ b/sys/dev/smartpqi/smartpqi_misc.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,9 +28,10 @@
#include "smartpqi_includes.h"
/*
- * Populate hostwell time variables in bcd format from FreeBSD format
+ * Populate hostwellness time variables in bcd format from FreeBSD format
*/
-void os_get_time(struct bmic_host_wellness_time *host_wellness_time)
+void
+os_get_time(struct bmic_host_wellness_time *host_wellness_time)
{
struct timespec ts;
struct clocktime ct;
@@ -49,13 +49,14 @@ void os_get_time(struct bmic_host_wellness_time *host_wellness_time)
host_wellness_time->century = (uint8_t)bin2bcd(ct.year / 100);
host_wellness_time->year = (uint8_t)bin2bcd(ct.year % 100);
-}
+}
/*
* Update host time to f/w every 24 hours in a periodic timer.
*/
-void os_wellness_periodic(void *data)
+void
+os_wellness_periodic(void *data)
{
struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data;
int ret = 0;
@@ -67,14 +68,15 @@ void os_wellness_periodic(void *data)
}
/* reschedule ourselves */
- callout_schedule(&softs->os_specific.wellness_periodic,
- OS_HOST_WELLNESS_TIMEOUT * hz);
+ callout_reset(&softs->os_specific.wellness_periodic,
+ PQI_HOST_WELLNESS_TIMEOUT_SEC * hz, os_wellness_periodic, softs);
}
/*
* Routine used to stop the heart-beat timer
*/
-void os_stop_heartbeat_timer(pqisrc_softstate_t *softs)
+void
+os_stop_heartbeat_timer(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
@@ -87,7 +89,8 @@ void os_stop_heartbeat_timer(pqisrc_softstate_t *softs)
/*
* Routine used to start the heart-beat timer
*/
-void os_start_heartbeat_timer(void *data)
+void
+os_start_heartbeat_timer(void *data)
{
struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data;
DBG_FUNC("IN\n");
@@ -95,8 +98,8 @@ void os_start_heartbeat_timer(void *data)
pqisrc_heartbeat_timer_handler(softs);
if (!pqisrc_ctrl_offline(softs)) {
callout_reset(&softs->os_specific.heartbeat_timeout_id,
- OS_FW_HEARTBEAT_TIMER_INTERVAL * hz,
- os_start_heartbeat_timer, softs);
+ PQI_HEARTBEAT_TIMEOUT_SEC * hz,
+ os_start_heartbeat_timer, softs);
}
DBG_FUNC("OUT\n");
@@ -105,48 +108,49 @@ void os_start_heartbeat_timer(void *data)
/*
* Mutex initialization function
*/
-int os_init_spinlock(struct pqisrc_softstate *softs, struct mtx *lock,
+int
+os_init_spinlock(struct pqisrc_softstate *softs, struct mtx *lock,
char *lockname)
{
- mtx_init(lock, lockname, NULL, MTX_SPIN);
- return 0;
-
+ mtx_init(lock, lockname, NULL, MTX_SPIN);
+ return 0;
}
/*
* Mutex uninitialization function
*/
-void os_uninit_spinlock(struct mtx *lock)
+void
+os_uninit_spinlock(struct mtx *lock)
{
- mtx_destroy(lock);
- return;
-
+ mtx_destroy(lock);
+ return;
}
/*
* Semaphore initialization function
*/
-int os_create_semaphore(const char *name, int value, struct sema *sema)
+int
+os_create_semaphore(const char *name, int value, struct sema *sema)
{
- sema_init(sema, value, name);
- return PQI_STATUS_SUCCESS;
-
+ sema_init(sema, value, name);
+ return PQI_STATUS_SUCCESS;
}
/*
* Semaphore uninitialization function
*/
-int os_destroy_semaphore(struct sema *sema)
+int
+os_destroy_semaphore(struct sema *sema)
{
- sema_destroy(sema);
- return PQI_STATUS_SUCCESS;
-
+ sema_destroy(sema);
+ return PQI_STATUS_SUCCESS;
}
/*
* Semaphore grab function
*/
-void inline os_sema_lock(struct sema *sema)
+void inline
+os_sema_lock(struct sema *sema)
{
sema_post(sema);
}
@@ -154,7 +158,8 @@ void inline os_sema_lock(struct sema *sema)
/*
* Semaphore release function
*/
-void inline os_sema_unlock(struct sema *sema)
+void inline
+os_sema_unlock(struct sema *sema)
{
sema_wait(sema);
}
@@ -162,7 +167,17 @@ void inline os_sema_unlock(struct sema *sema)
/*
* string copy wrapper function
*/
-int os_strlcpy(char *dst, char *src, int size)
+int
+os_strlcpy(char *dst, char *src, int size)
{
return strlcpy(dst, src, size);
}
+
+int
+bsd_status_to_pqi_status(int bsd_status)
+{
+ if (bsd_status == BSD_SUCCESS)
+ return PQI_STATUS_SUCCESS;
+ else
+ return PQI_STATUS_FAILURE;
+}
diff --git a/sys/dev/smartpqi/smartpqi_prototypes.h b/sys/dev/smartpqi/smartpqi_prototypes.h
index 692a471a6d13..34b10c5941e8 100644
--- a/sys/dev/smartpqi/smartpqi_prototypes.h
+++ b/sys/dev/smartpqi/smartpqi_prototypes.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -38,8 +37,8 @@ void pqisrc_pqi_uninit(pqisrc_softstate_t *);
int pqisrc_process_config_table(pqisrc_softstate_t *);
int pqisrc_flush_cache(pqisrc_softstate_t *, enum pqisrc_flush_cache_event_type);
int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *);
-int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *);
-void pqisrc_complete_internal_cmds(pqisrc_softstate_t *);
+inline boolean_t pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *,
+ struct pqi_conf_table_firmware_features *, uint16_t );
/* pqi_sis.c*/
int pqisrc_sis_init(pqisrc_softstate_t *);
@@ -53,6 +52,7 @@ int pqisrc_force_sis(pqisrc_softstate_t *);
int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *, uint32_t);
void sis_disable_interrupt(pqisrc_softstate_t*);
+
/* pqi_queue.c */
int pqisrc_submit_admin_req(pqisrc_softstate_t *,
gen_adm_req_iu_t *, gen_adm_resp_iu_t *);
@@ -90,11 +90,16 @@ void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device);
void pqisrc_init_targetid_pool(pqisrc_softstate_t *softs);
int pqisrc_alloc_tid(pqisrc_softstate_t *softs);
void pqisrc_free_tid(pqisrc_softstate_t *softs, int);
+int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
+ reportlun_data_ext_t **buff, size_t *data_length);
+int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
+ uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len);
/* pqi_helper.c */
boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *);
void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *);
-int pqisrc_wait_on_condition(pqisrc_softstate_t *, rcb_t *);
+int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb,
+ uint32_t timeout);
boolean_t pqisrc_device_equal(pqi_scsi_dev_t *, pqi_scsi_dev_t *);
boolean_t pqisrc_is_hba_lunid(uint8_t *);
boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *);
@@ -104,6 +109,14 @@ boolean_t pqisrc_scsi3addr_equal(uint8_t *, uint8_t *);
void check_struct_sizes(void);
char *pqisrc_raidlevel_to_string(uint8_t);
void pqisrc_configure_legacy_intx(pqisrc_softstate_t*, boolean_t);
+void pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *);
+void pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+void check_device_pending_commands_to_complete(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+uint32_t pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+
/* pqi_response.c */
void pqisrc_signal_event(pqisrc_softstate_t *softs, rcb_t *rcb);
@@ -113,32 +126,40 @@ void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *,
rcb_t *, uint16_t);
void pqisrc_process_io_response_success(pqisrc_softstate_t *,
rcb_t *);
+void pqisrc_show_sense_data_full(pqisrc_softstate_t *, rcb_t *, sense_data_u_t *sense_data);
void pqisrc_process_aio_response_error(pqisrc_softstate_t *,
rcb_t *, uint16_t);
void pqisrc_process_raid_response_error(pqisrc_softstate_t *,
rcb_t *, uint16_t);
void pqisrc_process_response_queue(pqisrc_softstate_t *, int);
+
+
/* pqi_request.c */
+int pqisrc_build_send_vendor_request(pqisrc_softstate_t*,
+ pqi_vendor_general_request_t *,
+ raid_path_error_info_elem_t *);
int pqisrc_build_send_io(pqisrc_softstate_t *,rcb_t *);
+
int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t*);
+
int pqisrc_send_tmf(pqisrc_softstate_t *, pqi_scsi_dev_t *,
- rcb_t *, int, int);
+ rcb_t *, rcb_t *, int);
int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs);
int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs);
+void pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf, uint32_t total_len, uint32_t flags);
+char *io_path_to_ascii(IO_PATH_T path);
/* pqi_event.c*/
int pqisrc_report_event_config(pqisrc_softstate_t *);
int pqisrc_set_event_config(pqisrc_softstate_t *);
int pqisrc_process_event_intr_src(pqisrc_softstate_t *,int);
void pqisrc_ack_all_events(void *arg);
+void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs);
-void pqisrc_event_worker(void *, int);
-int pqisrc_scsi_setup(struct pqisrc_softstate *);
-void pqisrc_scsi_cleanup(struct pqisrc_softstate *);
boolean_t pqisrc_update_scsi_sense(const uint8_t *, int,
struct sense_header_scsi *);
int pqisrc_build_send_raid_request(pqisrc_softstate_t *, pqisrc_raid_req_t *,
@@ -188,13 +209,24 @@ int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *);
int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *);
int pqisrc_process_task_management_response(pqisrc_softstate_t *,
pqi_tmf_resp_t *);
-void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs);
+
+/*Device outstanding Io count*/
+uint64_t pqisrc_increment_device_active_io(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+uint64_t pqisrc_decrement_device_active_io(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+void pqisrc_init_device_active_io(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
+uint64_t pqisrc_read_device_active_io(pqisrc_softstate_t *,
+ pqi_scsi_dev_t *);
/* pqi_ioctl.c*/
int
pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int);
+
+
/* Functions Prototypes */
/* FreeBSD_mem.c */
int os_dma_mem_alloc(pqisrc_softstate_t *,struct dma_mem *);
@@ -202,6 +234,9 @@ void os_dma_mem_free(pqisrc_softstate_t *,struct dma_mem *);
void *os_mem_alloc(pqisrc_softstate_t *,size_t);
void os_mem_free(pqisrc_softstate_t *,char *,size_t);
void os_resource_free(pqisrc_softstate_t *);
+int os_dma_setup(pqisrc_softstate_t *);
+int os_dma_destroy(pqisrc_softstate_t *);
+void os_update_dma_attributes(pqisrc_softstate_t *);
/* FreeBSD intr.c */
int os_get_intr_config(pqisrc_softstate_t *);
@@ -217,7 +252,7 @@ int os_copy_from_user(struct pqisrc_softstate *, void *,
void *, int, int);
int create_char_dev(struct pqisrc_softstate *, int);
void destroy_char_dev(struct pqisrc_softstate *);
-
+
/* FreeBSD_misc.c*/
int os_init_spinlock(struct pqisrc_softstate *, struct mtx *, char *);
void os_uninit_spinlock(struct mtx *);
@@ -233,16 +268,18 @@ void os_start_heartbeat_timer(void *);
/* FreeBSD_cam.c */
uint8_t os_get_task_attr(rcb_t *);
-void os_wellness_periodic(void *);
void smartpqi_target_rescan(struct pqisrc_softstate *);
+void os_rescan_target(struct pqisrc_softstate *, pqi_scsi_dev_t *);
/* FreeBSD_intr.c FreeBSD_main.c */
+void pqisrc_event_worker(void *, int);
void os_add_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *);
void os_io_response_success(rcb_t *);
void os_aio_response_error(rcb_t *, aio_path_error_info_elem_t *);
void smartpqi_adjust_queue_depth(struct cam_path *, uint32_t );
void os_raid_response_error(rcb_t *, raid_path_error_info_elem_t *);
+void os_wellness_periodic(void *);
void os_reset_rcb( rcb_t *);
int register_sim(struct pqisrc_softstate *, int);
void deregister_sim(struct pqisrc_softstate *);
@@ -255,6 +292,8 @@ void deregister_pqi_msix(pqisrc_softstate_t *);
void os_get_time(struct bmic_host_wellness_time *);
void os_eventtaskqueue_enqueue(pqisrc_softstate_t *);
void pqisrc_save_controller_info(struct pqisrc_softstate *);
-int smartpqi_shutdown(void *);
+
+/* Domain status conversion */
+int bsd_status_to_pqi_status(int );
#endif // _SMARTPQI_PROTOTYPES_H
diff --git a/sys/dev/smartpqi/smartpqi_queue.c b/sys/dev/smartpqi/smartpqi_queue.c
index ce06224c67b6..da75a8f975d9 100644
--- a/sys/dev/smartpqi/smartpqi_queue.c
+++ b/sys/dev/smartpqi/smartpqi_queue.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,7 +31,8 @@
* Submit an admin IU to the adapter.
* Add interrupt support, if required
*/
-int pqisrc_submit_admin_req(pqisrc_softstate_t *softs,
+int
+pqisrc_submit_admin_req(pqisrc_softstate_t *softs,
gen_adm_req_iu_t *req, gen_adm_resp_iu_t *resp)
{
int ret = PQI_STATUS_SUCCESS;
@@ -70,6 +70,7 @@ int pqisrc_submit_admin_req(pqisrc_softstate_t *softs,
if (tmo <= 0) {
DBG_ERR("Admin cmd timeout\n");
DBG_ERR("tmo : %d\n",tmo); \
+ /* TODO : PQI device status and error register and report */
ret = PQI_STATUS_TIMEOUT;
goto err_cmd;
}
@@ -77,10 +78,10 @@ int pqisrc_submit_admin_req(pqisrc_softstate_t *softs,
/* Copy the response */
memcpy(resp, ob_q->array_virt_addr + (ob_q->ci_local * ob_q->elem_size),
sizeof(gen_adm_resp_iu_t));
-
+
/* Update CI */
ob_q->ci_local = (ob_q->ci_local + 1 ) % ob_q->num_elem;
- PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
+ PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
ob_q->ci_register_offset, LE_32(ob_q->ci_local));
/* Validate the response data */
@@ -105,10 +106,12 @@ err_out:
/*
* Get the administration queue config parameters.
*/
-void pqisrc_get_admin_queue_config(pqisrc_softstate_t *softs)
+void
+pqisrc_get_admin_queue_config(pqisrc_softstate_t *softs)
{
uint64_t val = 0;
+
val = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP));
/* pqi_cap = (struct pqi_dev_adminq_cap *)&val;*/
@@ -125,14 +128,15 @@ void pqisrc_get_admin_queue_config(pqisrc_softstate_t *softs)
}
/*
- * Decide the no of elements in admin ib and ob queues.
+ * Decide the no of elements in admin ib and ob queues.
*/
-void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *softs)
+void
+pqisrc_decide_admin_queue_config(pqisrc_softstate_t *softs)
{
/* Determine num elements in Admin IBQ */
softs->admin_ib_queue.num_elem = MIN(softs->admin_ib_queue.num_elem,
PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM);
-
+
/* Determine num elements in Admin OBQ */
softs->admin_ob_queue.num_elem = MIN(softs->admin_ob_queue.num_elem,
PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM);
@@ -141,7 +145,8 @@ void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *softs)
/*
* Allocate DMA memory for admin queue and initialize.
*/
-int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
+int
+pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
{
uint32_t ib_array_size = 0;
uint32_t ob_array_size = 0;
@@ -185,18 +190,18 @@ int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs)
/* IB CI */
softs->admin_ib_queue.ci_virt_addr =
- (uint32_t*)((uint8_t*)softs->admin_ob_queue.array_virt_addr
+ (uint32_t*)((uint8_t*)softs->admin_ob_queue.array_virt_addr
+ ob_array_size);
softs->admin_ib_queue.ci_dma_addr =
- (dma_addr_t)((uint8_t*)softs->admin_ob_queue.array_dma_addr +
+ (dma_addr_t)((uint8_t*)softs->admin_ob_queue.array_dma_addr +
ob_array_size);
/* OB PI */
softs->admin_ob_queue.pi_virt_addr =
- (uint32_t*)((uint8_t*)(softs->admin_ib_queue.ci_virt_addr) +
+ (uint32_t*)((uint8_t*)(softs->admin_ib_queue.ci_virt_addr) +
PQI_ADDR_ALIGN_MASK_64 + 1);
softs->admin_ob_queue.pi_dma_addr =
- (dma_addr_t)((uint8_t*)(softs->admin_ib_queue.ci_dma_addr) +
+ (dma_addr_t)((uint8_t*)(softs->admin_ib_queue.ci_dma_addr) +
PQI_ADDR_ALIGN_MASK_64 + 1);
DBG_INIT("softs->admin_ib_queue.ci_dma_addr : %p,softs->admin_ob_queue.pi_dma_addr :%p\n",
@@ -223,8 +228,8 @@ err_out:
/*
* Subroutine used to create (or) delete the admin queue requested.
*/
-int pqisrc_create_delete_adminq(pqisrc_softstate_t *softs,
- uint32_t cmd)
+int
+pqisrc_create_delete_adminq(pqisrc_softstate_t *softs, uint32_t cmd)
{
int tmo = 0;
int ret = PQI_STATUS_SUCCESS;
@@ -232,7 +237,7 @@ int pqisrc_create_delete_adminq(pqisrc_softstate_t *softs,
/* Create Admin Q pair writing to Admin Q config function reg */
PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG, LE_64(cmd));
-
+
if (cmd == PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR)
tmo = PQISRC_ADMIN_QUEUE_CREATE_TIMEOUT;
else
@@ -243,16 +248,18 @@ int pqisrc_create_delete_adminq(pqisrc_softstate_t *softs,
PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
if (tmo <= 0) {
DBG_ERR("Unable to create/delete admin queue pair\n");
+ /* TODO : PQI device status and error register and report */
ret = PQI_STATUS_TIMEOUT;
}
return ret;
-}
+}
/*
* Debug admin queue configuration params.
*/
-void pqisrc_print_adminq_config(pqisrc_softstate_t *softs)
+void
+pqisrc_print_adminq_config(pqisrc_softstate_t *softs)
{
DBG_INFO(" softs->admin_ib_queue.array_dma_addr : %p\n",
(void*)softs->admin_ib_queue.array_dma_addr);
@@ -279,7 +286,8 @@ void pqisrc_print_adminq_config(pqisrc_softstate_t *softs)
/*
* Function used to create an admin queue.
*/
-int pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
+int
+pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t admin_q_param = 0;
@@ -300,30 +308,31 @@ int pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
}
/* Write IB Q element array address */
- PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_elem_array_addr,
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_elem_array_addr,
PQI_ADMIN_IBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ib_queue.array_dma_addr));
/* Write OB Q element array address */
- PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_elem_array_addr,
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_elem_array_addr,
PQI_ADMIN_OBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ob_queue.array_dma_addr));
/* Write IB Q CI address */
- PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_ci_addr,
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_ci_addr,
PQI_ADMIN_IBQ_CI_ADDR, LE_64(softs->admin_ib_queue.ci_dma_addr));
/* Write OB Q PI address */
- PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_pi_addr,
+ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_pi_addr,
PQI_ADMIN_OBQ_PI_ADDR, LE_64(softs->admin_ob_queue.pi_dma_addr));
+
/* Write Admin Q params pqi-r200a table 36 */
admin_q_param = softs->admin_ib_queue.num_elem |
(softs->admin_ob_queue.num_elem << 8)|
PQI_ADMIN_QUEUE_MSIX_DISABLE;
-
- PCI_MEM_PUT32(softs, &softs->pqi_reg->admin_q_param,
+
+ PCI_MEM_PUT32(softs, &softs->pqi_reg->admin_q_param,
PQI_ADMINQ_PARAM, LE_32(admin_q_param));
-
+
/* Submit cmd to create Admin Q pair */
ret = pqisrc_create_delete_adminq(softs,
PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR);
@@ -335,14 +344,14 @@ int pqisrc_create_admin_queue(pqisrc_softstate_t *softs)
/* Admin queue created, get ci,pi offset */
softs->admin_ib_queue.pi_register_offset =(PQISRC_PQI_REG_OFFSET +
PCI_MEM_GET64(softs, &softs->pqi_reg->admin_ibq_pi_offset, PQI_ADMIN_IBQ_PI_OFFSET));
-
- softs->admin_ib_queue.pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr +
+
+ softs->admin_ib_queue.pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr +
softs->admin_ib_queue.pi_register_offset);
-
+
softs->admin_ob_queue.ci_register_offset = (PQISRC_PQI_REG_OFFSET +
PCI_MEM_GET64(softs, &softs->pqi_reg->admin_obq_ci_offset, PQI_ADMIN_OBQ_CI_OFFSET));
- softs->admin_ob_queue.ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr +
+ softs->admin_ob_queue.ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr +
softs->admin_ob_queue.ci_register_offset);
os_strlcpy(softs->admin_ib_queue.lockname, "admin_ibqlock", LOCKNAME_SIZE);
@@ -373,8 +382,9 @@ err_out:
/*
* Subroutine used to delete an operational queue.
*/
-int pqisrc_delete_op_queue(pqisrc_softstate_t *softs,
- uint32_t q_id, boolean_t ibq)
+int
+pqisrc_delete_op_queue(pqisrc_softstate_t *softs,
+ uint32_t q_id, boolean_t ibq)
{
int ret = PQI_STATUS_SUCCESS;
/* Firmware doesn't support this now */
@@ -383,6 +393,7 @@ int pqisrc_delete_op_queue(pqisrc_softstate_t *softs,
gen_adm_req_iu_t admin_req;
gen_adm_resp_iu_t admin_resp;
+
memset(&admin_req, 0, sizeof(admin_req));
memset(&admin_resp, 0, sizeof(admin_resp));
@@ -395,6 +406,7 @@ int pqisrc_delete_op_queue(pqisrc_softstate_t *softs,
else
admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_OQ;
+
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
DBG_FUNC("OUT\n");
@@ -405,7 +417,8 @@ int pqisrc_delete_op_queue(pqisrc_softstate_t *softs,
/*
* Function used to destroy the event queue.
*/
-void pqisrc_destroy_event_queue(pqisrc_softstate_t *softs)
+void
+pqisrc_destroy_event_queue(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
@@ -427,7 +440,8 @@ void pqisrc_destroy_event_queue(pqisrc_softstate_t *softs)
/*
* Function used to destroy operational ib queues.
*/
-void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
+void
+pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
ib_queue_t *op_ib_q = NULL;
@@ -442,15 +456,15 @@ void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true);
if (ret) {
DBG_ERR("Failed to Delete Raid IB Q %d\n",op_ib_q->q_id);
- }
+ }
op_ib_q->created = false;
}
-
+
if(op_ib_q->lockcreated==true){
OS_UNINIT_PQILOCK(&op_ib_q->lock);
op_ib_q->lockcreated = false;
}
-
+
/* OP AIO IB Q */
op_ib_q = &softs->op_aio_ib_q[i];
if (op_ib_q->created == true) {
@@ -460,7 +474,7 @@ void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
}
op_ib_q->created = false;
}
-
+
if(op_ib_q->lockcreated==true){
OS_UNINIT_PQILOCK(&op_ib_q->lock);
op_ib_q->lockcreated = false;
@@ -475,7 +489,8 @@ void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs)
/*
* Function used to destroy operational ob queues.
*/
-void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs)
+void
+pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
int i;
@@ -502,7 +517,8 @@ void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs)
/*
* Function used to destroy an admin queue.
*/
-int pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs)
+int
+pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
@@ -510,8 +526,8 @@ int pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs)
#if 0
ret = pqisrc_create_delete_adminq(softs,
PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR);
-#endif
- os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
+#endif
+ os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
DBG_FUNC("OUT\n");
return ret;
@@ -520,7 +536,8 @@ int pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs)
/*
* Function used to change operational ib queue properties.
*/
-int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *softs,
+int
+pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *softs,
ib_queue_t *op_ib_q, uint32_t prop)
{
int ret = PQI_STATUS_SUCCESS;
@@ -545,7 +562,8 @@ int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *softs,
/*
* Function used to create an operational ob queue.
*/
-int pqisrc_create_op_obq(pqisrc_softstate_t *softs,
+int
+pqisrc_create_op_obq(pqisrc_softstate_t *softs,
ob_queue_t *op_ob_q)
{
int ret = PQI_STATUS_SUCCESS;
@@ -558,7 +576,7 @@ int pqisrc_create_op_obq(pqisrc_softstate_t *softs,
memset(&admin_resp, 0, sizeof(admin_resp));
admin_req.fn_code = PQI_FUNCTION_CREATE_OPERATIONAL_OQ;
- admin_req.req_type.create_op_oq.qid = op_ob_q->q_id;
+ admin_req.req_type.create_op_oq.qid = op_ob_q->q_id;
admin_req.req_type.create_op_oq.intr_msg_num = op_ob_q->intr_msg_num;
admin_req.req_type.create_op_oq.elem_arr_addr = op_ob_q->array_dma_addr;
admin_req.req_type.create_op_oq.ob_pi_addr = op_ob_q->pi_dma_addr;
@@ -570,7 +588,7 @@ int pqisrc_create_op_obq(pqisrc_softstate_t *softs,
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
if( PQI_STATUS_SUCCESS == ret) {
- op_ob_q->ci_register_offset = (PQISRC_PQI_REG_OFFSET +
+ op_ob_q->ci_register_offset = (PQISRC_PQI_REG_OFFSET +
admin_resp.resp_type.create_op_oq.ci_offset);
op_ob_q->ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr +
op_ob_q->ci_register_offset);
@@ -578,7 +596,7 @@ int pqisrc_create_op_obq(pqisrc_softstate_t *softs,
int i = 0;
DBG_WARN("Error Status Descriptors\n");
for(i = 0; i < 4;i++)
- DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]);
+ DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]);
}
DBG_FUNC("OUT ret : %d\n", ret);
@@ -589,7 +607,8 @@ int pqisrc_create_op_obq(pqisrc_softstate_t *softs,
/*
* Function used to create an operational ib queue.
*/
-int pqisrc_create_op_ibq(pqisrc_softstate_t *softs,
+int
+pqisrc_create_op_ibq(pqisrc_softstate_t *softs,
ib_queue_t *op_ib_q)
{
int ret = PQI_STATUS_SUCCESS;
@@ -611,26 +630,27 @@ int pqisrc_create_op_ibq(pqisrc_softstate_t *softs,
ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
if( PQI_STATUS_SUCCESS == ret) {
- op_ib_q->pi_register_offset =(PQISRC_PQI_REG_OFFSET +
+ op_ib_q->pi_register_offset =(PQISRC_PQI_REG_OFFSET +
admin_resp.resp_type.create_op_iq.pi_offset);
-
+
op_ib_q->pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr +
op_ib_q->pi_register_offset);
} else {
int i = 0;
DBG_WARN("Error Status Decsriptors\n");
for(i = 0; i < 4;i++)
- DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]);
+ DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]);
}
DBG_FUNC("OUT ret : %d\n", ret);
- return ret;
+ return ret;
}
/*
* subroutine used to create an operational ib queue for AIO.
*/
-int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *softs,
+int
+pqisrc_create_op_aio_ibq(pqisrc_softstate_t *softs,
ib_queue_t *op_aio_ib_q)
{
int ret = PQI_STATUS_SUCCESS;
@@ -649,7 +669,8 @@ int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *softs,
/*
* subroutine used to create an operational ib queue for RAID.
*/
-int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *softs,
+int
+pqisrc_create_op_raid_ibq(pqisrc_softstate_t *softs,
ib_queue_t *op_raid_ib_q)
{
int ret = PQI_STATUS_SUCCESS;
@@ -665,29 +686,31 @@ int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *softs,
/*
* Allocate and create an event queue to process supported events.
*/
-int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs)
+int
+pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
uint32_t num_elem;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
- uint32_t event_q_pi_dma_start_offset = 0;
+ uint64_t event_q_pi_dma_start_offset = 0;
uint32_t event_q_pi_virt_start_offset = 0;
char *event_q_pi_virt_start_addr = NULL;
ob_queue_t *event_q = NULL;
+
DBG_FUNC("IN\n");
- /*
- * Calculate memory requirements.
- * If event queue is shared for IO response, number of
- * elements in event queue depends on num elements in OP OB Q
- * also. Since event queue element size (32) is more than IO
+ /*
+ * Calculate memory requirements.
+ * If event queue is shared for IO response, number of
+ * elements in event queue depends on num elements in OP OB Q
+ * also. Since event queue element size (32) is more than IO
* response size , event queue element size need not be checked
* for queue size calculation.
*/
-#ifdef SHARE_EVENT_QUEUE_FOR_IO
+#ifdef SHARE_EVENT_QUEUE_FOR_IO
num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_NUM_EVENT_Q_ELEM);
#else
num_elem = PQISRC_NUM_EVENT_Q_ELEM;
@@ -745,20 +768,21 @@ err_out:
/*
* Allocate DMA memory and create operational ib queues.
- */
-int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
+ */
+int
+pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
uint32_t ibq_size = 0;
- uint32_t ib_ci_dma_start_offset = 0;
+ uint64_t ib_ci_dma_start_offset = 0;
char *ib_ci_virt_start_addr = NULL;
- uint32_t ib_ci_virt_start_offset = 0;
+ uint32_t ib_ci_virt_start_offset = 0;
uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID;
ib_queue_t *op_ib_q = NULL;
- uint32_t num_op_ibq = softs->num_op_raid_ibq +
+ uint32_t num_op_ibq = softs->num_op_raid_ibq +
softs->num_op_aio_ibq;
int i = 0;
@@ -767,7 +791,7 @@ int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
/* Calculate memory requirements */
ibq_size = softs->num_elem_per_op_ibq * softs->ibq_elem_size;
alloc_size = num_op_ibq * ibq_size;
- /* CI indexes starts after Queue element array */
+ /* CI indexes starts after Queue element array */
ib_ci_dma_start_offset = alloc_size;
ib_ci_virt_start_offset = alloc_size;
alloc_size += num_op_ibq * sizeof(uint32_t); /*For IBQ CI*/
@@ -798,21 +822,24 @@ int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
op_ib_q->q_id = ibq_id++;
- snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "raid_ibqlock%d", i);
+ snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "raid_ibqlock%d", i);
ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
- if(ret){
- DBG_ERR("raid_ibqlock %d init failed\n", i);
- op_ib_q->lockcreated = false;
- goto err_lock;
+ if(ret){
+ /* TODO: error handling */
+ DBG_ERR("raid_ibqlock %d init failed\n", i);
+ op_ib_q->lockcreated = false;
+ goto err_lock;
}
- op_ib_q->lockcreated = true;
- op_ib_q->num_elem = softs->num_elem_per_op_ibq;
+ op_ib_q->lockcreated = true;
+
+ op_ib_q->num_elem = softs->num_elem_per_op_ibq;
op_ib_q->elem_size = softs->ibq_elem_size;
op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
(2 * i * sizeof(uint32_t));
op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
(2 * i * sizeof(uint32_t)));
- ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
+ ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
+
ret = pqisrc_create_op_raid_ibq(softs, op_ib_q);
if (ret) {
DBG_ERR("[ %s ] Failed to Create OP Raid IBQ %d\n",
@@ -828,21 +855,24 @@ int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs)
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr);
op_ib_q->q_id = ibq_id++;
- snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "aio_ibqlock%d", i);
+ snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "aio_ibqlock%d", i);
ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname);
- if(ret){
- DBG_ERR("aio_ibqlock %d init failed\n", i);
- op_ib_q->lockcreated = false;
- goto err_lock;
- }
- op_ib_q->lockcreated = true;
- op_ib_q->num_elem = softs->num_elem_per_op_ibq;
+ if(ret){
+ /* TODO: error handling */
+ DBG_ERR("aio_ibqlock %d init failed\n", i);
+ op_ib_q->lockcreated = false;
+ goto err_lock;
+ }
+ op_ib_q->lockcreated = true;
+
+ op_ib_q->num_elem = softs->num_elem_per_op_ibq;
op_ib_q->elem_size = softs->ibq_elem_size;
op_ib_q->ci_dma_addr = ib_ci_dma_start_offset +
(((2 * i) + 1) * sizeof(uint32_t));
op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr +
(((2 * i) + 1) * sizeof(uint32_t)));
ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4));
+
ret = pqisrc_create_op_aio_ibq(softs, op_ib_q);
if (ret) {
DBG_ERR("Failed to Create OP AIO IBQ %d\n",op_ib_q->q_id);
@@ -867,15 +897,16 @@ err_out:
/*
* Allocate DMA memory and create operational ob queues.
- */
-int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
+ */
+int
+pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t alloc_size = 0;
char *virt_addr = NULL;
dma_addr_t dma_addr = 0;
uint32_t obq_size = 0;
- uint32_t ob_pi_dma_start_offset = 0;
+ uint64_t ob_pi_dma_start_offset = 0;
uint32_t ob_pi_virt_start_offset = 0;
char *ob_pi_virt_start_addr = NULL;
uint32_t obq_id = PQI_MIN_OP_OB_QUEUE_ID;
@@ -885,17 +916,17 @@ int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
DBG_FUNC("IN\n");
- /*
- * OB Q element array should be 64 byte aligned.
- * So the number of elements in OB Q should be multiple
- * of 4, so that OB Queue element size (16) * num elements
+ /*
+ * OB Q element array should be 64 byte aligned.
+ * So the number of elements in OB Q should be multiple
+ * of 4, so that OB Queue element size (16) * num elements
* will be multiple of 64.
*/
ALIGN_BOUNDARY(softs->num_elem_per_op_obq, 4);
obq_size = softs->num_elem_per_op_obq * softs->obq_elem_size;
alloc_size += num_op_obq * obq_size;
- /* PI indexes starts after Queue element array */
+ /* PI indexes starts after Queue element array */
ob_pi_dma_start_offset = alloc_size;
ob_pi_virt_start_offset = alloc_size;
alloc_size += num_op_obq * sizeof(uint32_t); /*For OBQ PI*/
@@ -919,15 +950,15 @@ int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
DBG_INFO("softs->num_op_obq %d\n",softs->num_op_obq);
- for (i = 0; i < softs->num_op_obq; i++) {
+ for (i = 0; i < softs->num_op_obq; i++) {
op_ob_q = &softs->op_ob_q[i];
ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64));
FILL_QUEUE_ARRAY_ADDR(op_ob_q,virt_addr,dma_addr);
op_ob_q->q_id = obq_id++;
if(softs->share_opq_and_eventq == true)
- op_ob_q->intr_msg_num = i;
+ op_ob_q->intr_msg_num = i;
else
- op_ob_q->intr_msg_num = i + 1; /* msg num zero for event */
+ op_ob_q->intr_msg_num = i + 1; /* msg num zero for event */
op_ob_q->num_elem = softs->num_elem_per_op_obq;
op_ob_q->elem_size = softs->obq_elem_size;
op_ob_q->pi_dma_addr = ob_pi_dma_start_offset +
@@ -935,7 +966,7 @@ int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs)
op_ob_q->pi_virt_addr = (uint32_t*)(ob_pi_virt_start_addr +
(i * sizeof(uint32_t)));
ASSERT(!(op_ob_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4));
-
+
ret = pqisrc_create_op_obq(softs,op_ob_q);
if (ret) {
DBG_ERR("Failed to Create OP OBQ %d\n",op_ob_q->q_id);
@@ -958,13 +989,14 @@ err_out:
/*
* Function used to create operational queues for the adapter.
- */
-int pqisrc_create_op_queues(pqisrc_softstate_t *softs)
+ */
+int
+pqisrc_create_op_queues(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
DBG_FUNC("IN\n");
-
+
/* Create Operational IB queues */
ret = pqisrc_alloc_and_create_ib_queues(softs);
if (ret)
@@ -977,12 +1009,12 @@ int pqisrc_create_op_queues(pqisrc_softstate_t *softs)
/* Create Event queue */
ret = pqisrc_alloc_and_create_event_queue(softs);
if (ret)
- goto err_out_eventq;
+ goto err_out_eventq;
DBG_FUNC("OUT\n");
return ret;
err_out_eventq:
- pqisrc_destroy_op_ob_queues(softs);
+ pqisrc_destroy_op_ob_queues(softs);
err_out_obq:
pqisrc_destroy_op_ib_queues(softs);
err_out:
diff --git a/sys/dev/smartpqi/smartpqi_request.c b/sys/dev/smartpqi/smartpqi_request.c
index bf6e43e9dd4e..222852fbe088 100644
--- a/sys/dev/smartpqi/smartpqi_request.c
+++ b/sys/dev/smartpqi/smartpqi_request.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,12 +27,26 @@
#include "smartpqi_includes.h"
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+
+#define HPSA_RAID_0 0
+#define HPSA_RAID_4 1
+#define HPSA_RAID_1 2 /* also used for RAID 10 */
+#define HPSA_RAID_5 3 /* also used for RAID 50 */
+#define HPSA_RAID_51 4
+#define HPSA_RAID_6 5 /* also used for RAID 60 */
+#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
+#define HPSA_RAID_MAX HPSA_RAID_ADM
+#define HPSA_RAID_UNKNOWN 0xff
+
#define SG_FLAG_LAST 0x40000000
#define SG_FLAG_CHAIN 0x80000000
/* Subroutine to find out embedded sgl count in IU */
-static inline
-uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted)
+static inline uint32_t
+pqisrc_embedded_sgl_count(uint32_t elem_alloted)
{
uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU;
DBG_FUNC(" IN ");
@@ -51,15 +64,15 @@ uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted)
}
/* Subroutine to find out contiguous free elem in IU */
-static inline
-uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
+static inline uint32_t
+pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
{
uint32_t contiguous_free_elem = 0;
DBG_FUNC(" IN ");
if(pi >= ci) {
- contiguous_free_elem = (elem_in_q - pi);
+ contiguous_free_elem = (elem_in_q - pi);
if(ci == 0)
contiguous_free_elem -= 1;
} else {
@@ -80,7 +93,7 @@ pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count)
DBG_FUNC(" IN ");
DBG_IO("SGL_Count :%d",SG_Count);
/********
- If SG_Count greater than max sg per IU i.e 4 or 68
+ If SG_Count greater than max sg per IU i.e 4 or 68
(4 is with out spanning or 68 is with spanning) chaining is required.
OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then,
on these two cases one element is enough.
@@ -97,13 +110,13 @@ pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count)
}
/* Subroutine to build SG list for the IU submission*/
-static
-boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
+static boolean_t
+pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
uint32_t num_elem_alloted)
{
uint32_t i;
uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
- sgt_t *sgt = sg_array;
+ sgt_t *sgt = sg_array;
sgt_t *sg_chain = NULL;
boolean_t partial = false;
@@ -120,7 +133,7 @@ boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
sgt->len= OS_GET_IO_SG_LEN(rcb,i);
sgt->flags= 0;
}
-
+
sg_array[num_sg - 1].flags = SG_FLAG_LAST;
} else {
/**
@@ -130,17 +143,18 @@ boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
sgt->addr = rcb->sg_chain_dma;
sgt->len = num_sg * sizeof(sgt_t);
sgt->flags = SG_FLAG_CHAIN;
-
+
sgt = sg_chain;
for (i = 0; i < num_sg; i++, sgt++) {
sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
sgt->len = OS_GET_IO_SG_LEN(rcb,i);
sgt->flags = 0;
}
-
- sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
+
+ sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
num_sg = 1;
partial = true;
+
}
out:
iu_hdr->iu_length = num_sg * sizeof(sgt_t);
@@ -150,8 +164,8 @@ out:
}
/*Subroutine used to Build the RAID request */
-static void
-pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
+static void
+pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
{
DBG_FUNC(" IN ");
@@ -164,8 +178,8 @@ pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
raid_req->request_id = rcb->tag;
raid_req->nexus_id = 0;
raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
- memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
- sizeof(raid_req->lun_number));
+ memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
+ sizeof(raid_req->lun_number));
raid_req->protocol_spec = 0;
raid_req->data_direction = rcb->data_dir;
raid_req->reserved1 = 0;
@@ -178,7 +192,7 @@ pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
raid_req->reserved4 = 0;
raid_req->reserved5 = 0;
- /* As cdb and additional_cdb_bytes are contiguous,
+ /* As cdb and additional_cdb_bytes are contiguous,
update them in a single statement */
memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen);
#if 0
@@ -216,9 +230,9 @@ pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
/* Frame SGL Descriptor */
raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
- &raid_req->header, num_elem_alloted);
+ &raid_req->header, num_elem_alloted);
- raid_req->header.iu_length +=
+ raid_req->header.iu_length +=
offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
#if 0
@@ -232,9 +246,9 @@ pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr);
DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len);
DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags);
-#endif
- rcb->success_cmp_callback = pqisrc_process_io_response_success;
- rcb->error_cmp_callback = pqisrc_process_raid_response_error;
+#endif
+ rcb->success_cmp_callback = pqisrc_process_io_response_success;
+ rcb->error_cmp_callback = pqisrc_process_raid_response_error;
rcb->resp_qid = raid_req->response_queue_id;
DBG_FUNC(" OUT ");
@@ -243,7 +257,7 @@ pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
/*Subroutine used to Build the AIO request */
static void
-pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
+pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
{
DBG_FUNC(" IN ");
@@ -262,11 +276,12 @@ pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
aio_req->mem_type = 0;
aio_req->fence = 0;
aio_req->res2 = 0;
- aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
+ aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
aio_req->cmd_prio = 0;
aio_req->res3 = 0;
aio_req->err_idx = aio_req->req_id;
aio_req->cdb_len = rcb->cmdlen;
+
if(rcb->cmdlen > sizeof(aio_req->cdb))
rcb->cmdlen = sizeof(aio_req->cdb);
memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
@@ -288,7 +303,7 @@ pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
aio_req->encrypt_key_index = 0;
aio_req->encrypt_twk_high = 0;
aio_req->encrypt_twk_low = 0;
- }
+ }
/* Frame SGL Descriptor */
aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
@@ -298,7 +313,7 @@ pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg);
- aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
+ aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
sizeof(iu_header_t);
#if 0
DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type);
@@ -315,8 +330,8 @@ pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags);
#endif
- rcb->success_cmp_callback = pqisrc_process_io_response_success;
- rcb->error_cmp_callback = pqisrc_process_aio_response_error;
+ rcb->success_cmp_callback = pqisrc_process_io_response_success;
+ rcb->error_cmp_callback = pqisrc_process_aio_response_error;
rcb->resp_qid = aio_req->response_queue_id;
DBG_FUNC(" OUT ");
@@ -324,14 +339,15 @@ pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
}
/*Function used to build and send RAID/AIO */
-int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
+int
+pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
{
ib_queue_t *ib_q_array = softs->op_aio_ib_q;
ib_queue_t *ib_q = NULL;
- char *ib_iu = NULL;
+ char *ib_iu = NULL;
IO_PATH_T io_path = AIO_PATH;
- uint32_t TraverseCount = 0;
- int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
+ uint32_t TraverseCount = 0;
+ int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
int qindex = first_qindex;
uint32_t num_op_ib_q = softs->num_op_aio_ibq;
uint32_t num_elem_needed;
@@ -341,29 +357,35 @@ int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
DBG_FUNC(" IN ");
- rcb->cdbp = OS_GET_CDBP(rcb);
-
- if(IS_AIO_PATH(devp)) {
- /** IO for Physical Drive **/
- /** Send in AIO PATH**/
- rcb->ioaccel_handle = devp->ioaccel_handle;
- } else {
- int ret = PQI_STATUS_FAILURE;
- /** IO for RAID Volume **/
- if (devp->offload_enabled) {
- /** ByPass IO ,Send in AIO PATH **/
- ret = pqisrc_send_scsi_cmd_raidbypass(softs,
- devp, rcb, raidbypass_cdb);
- }
-
- if (PQI_STATUS_FAILURE == ret) {
- /** Send in RAID PATH **/
- io_path = RAID_PATH;
- num_op_ib_q = softs->num_op_raid_ibq;
- ib_q_array = softs->op_raid_ib_q;
+ if(!rcb->aio_retry) {
+ rcb->cdbp = OS_GET_CDBP(rcb);
+ if(IS_AIO_PATH(devp)) {
+ /** IO for Physical Drive **/
+ /** Send in AIO PATH**/
+ rcb->ioaccel_handle = devp->ioaccel_handle;
} else {
- rcb->cdbp = raidbypass_cdb;
+ int ret = PQI_STATUS_FAILURE;
+ /** IO for RAID Volume **/
+ if (devp->offload_enabled) {
+ /** ByPass IO ,Send in AIO PATH **/
+ ret = pqisrc_send_scsi_cmd_raidbypass(softs,
+ devp, rcb, raidbypass_cdb);
+ }
+ if (PQI_STATUS_FAILURE == ret) {
+ /** Send in RAID PATH **/
+ io_path = RAID_PATH;
+ num_op_ib_q = softs->num_op_raid_ibq;
+ ib_q_array = softs->op_raid_ib_q;
+ } else {
+ rcb->cdbp = raidbypass_cdb;
+ }
}
+ } else {
+ /* Retrying failed AIO IO */
+ io_path = RAID_PATH;
+ rcb->cdbp = OS_GET_CDBP(rcb);
+ num_op_ib_q = softs->num_op_raid_ibq;
+ ib_q_array = softs->op_raid_ib_q;
}
num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb));
@@ -372,10 +394,10 @@ int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
do {
uint32_t num_elem_available;
ib_q = (ib_q_array + qindex);
- PQI_LOCK(&ib_q->lock);
+ PQI_LOCK(&ib_q->lock);
num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
*(ib_q->ci_virt_addr), ib_q->num_elem);
-
+
DBG_IO("num_elem_avialable :%d\n",num_elem_available);
if(num_elem_available >= num_elem_needed) {
num_elem_alloted = num_elem_needed;
@@ -383,7 +405,7 @@ int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
}
DBG_IO("Current queue is busy! Hop to next queue\n");
- PQI_UNLOCK(&ib_q->lock);
+ PQI_UNLOCK(&ib_q->lock);
qindex = (qindex + 1) % num_op_ib_q;
if(qindex == first_qindex) {
if (num_elem_needed == 1)
@@ -397,7 +419,9 @@ int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
if (num_elem_alloted == 0) {
DBG_WARN("OUT: IB Queues were full\n");
return PQI_STATUS_QFULL;
- }
+ }
+
+ pqisrc_increment_device_active_io(softs,devp);
/* Get IB Queue Slot address to build IU */
ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
@@ -413,6 +437,8 @@ int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
}
rcb->req_pending = true;
+ rcb->req_q = ib_q;
+ rcb->path = io_path;
/* Update the local PI */
ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
@@ -423,15 +449,15 @@ int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
/* Inform the fw about the new IU */
PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
- PQI_UNLOCK(&ib_q->lock);
+ PQI_UNLOCK(&ib_q->lock);
DBG_FUNC(" OUT ");
return PQI_STATUS_SUCCESS;
}
/* Subroutine used to set encryption info as part of RAID bypass IO*/
-static inline void pqisrc_set_enc_info(
- struct pqi_enc_info *enc_info, struct raid_map *raid_map,
- uint64_t first_block)
+static inline void
+pqisrc_set_enc_info(struct pqi_enc_info *enc_info,
+ struct raid_map *raid_map, uint64_t first_block)
{
uint32_t volume_blk_size;
@@ -450,23 +476,12 @@ static inline void pqisrc_set_enc_info(
enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
}
-/*
- * Attempt to perform offload RAID mapping for a logical volume I/O.
- */
-
-#define HPSA_RAID_0 0
-#define HPSA_RAID_4 1
-#define HPSA_RAID_1 2 /* also used for RAID 10 */
-#define HPSA_RAID_5 3 /* also used for RAID 50 */
-#define HPSA_RAID_51 4
-#define HPSA_RAID_6 5 /* also used for RAID 60 */
-#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
-#define HPSA_RAID_MAX HPSA_RAID_ADM
-#define HPSA_RAID_UNKNOWN 0xff
-
/* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
-int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
- uint32_t *blk_cnt) {
+int
+check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
+ uint32_t *blk_cnt)
+{
+
switch (cdb[0]) {
case SCMD_WRITE_6:
*is_write = true;
@@ -502,10 +517,58 @@ int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
return PQI_STATUS_SUCCESS;
}
+/* print any arbitrary buffer of length total_len */
+void
+pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf,
+ uint32_t total_len, uint32_t flags)
+{
+#define LINE_BUF_LEN 60
+#define INDEX_PER_LINE 16
+ uint32_t buf_consumed = 0;
+ int ii;
+ char line_buf[LINE_BUF_LEN];
+ int line_len; /* written length per line */
+ uint8_t this_char;
+
+ if (user_buf == NULL)
+ return;
+
+ /* Print index columns */
+ if (flags & PRINT_FLAG_HDR_COLUMN)
+ {
+ for (ii = 0, line_len = 0; ii < MIN(total_len, 16); ii++)
+ {
+ line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02d ", ii);
+ if ((line_len + 4) >= LINE_BUF_LEN)
+ break;
+ }
+ DBG_NOTE("%15.15s:[ %s ]\n", "header", line_buf);
+ }
+
+ /* Print index columns */
+ while(buf_consumed < total_len)
+ {
+ memset(line_buf, 0, LINE_BUF_LEN);
+
+ for (ii = 0, line_len = 0; ii < INDEX_PER_LINE; ii++)
+ {
+ this_char = *((char*)(user_buf) + buf_consumed);
+ line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02x ", this_char);
+
+ buf_consumed++;
+ if (buf_consumed >= total_len || (line_len + 4) >= LINE_BUF_LEN)
+ break;
+ }
+ DBG_NOTE("%15.15s:[ %s ]\n", msg, line_buf);
+ }
+}
+
+
/*
* Function used to build and send RAID bypass request to the adapter
*/
-int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
+int
+pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb)
{
struct raid_map *raid_map;
@@ -539,14 +602,14 @@ int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
for(i = 0; i < rcb->cmdlen ; i++)
DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]);
- if(check_for_scsi_opcode(cdb, &is_write,
+ if(check_for_scsi_opcode(cdb, &is_write,
&fst_blk, &blk_cnt) == PQI_STATUS_FAILURE)
return PQI_STATUS_FAILURE;
/* Check for write to non-RAID-0. */
if (is_write && device->raid_level != SA_RAID_0)
return PQI_STATUS_FAILURE;
- if(blk_cnt == 0)
+ if(blk_cnt == 0)
return PQI_STATUS_FAILURE;
lst_blk = fst_blk + blk_cnt - 1;
@@ -564,7 +627,8 @@ int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
/* Calculate stripe information for the request. */
blks_per_row = data_disks_per_row * strip_sz;
if (!blks_per_row)
- return PQI_STATUS_FAILURE;
+ return PQI_STATUS_FAILURE; /*Send the IO in raid path itself, not AIO or raidbypass*/
+
/* use __udivdi3 ? */
fst_row = fst_blk / blks_per_row;
lst_row = lst_blk / blks_per_row;
@@ -689,9 +753,6 @@ int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
(map_row * total_disks_per_row) + fst_col;
}
- if (map_idx >= RAID_MAP_MAX_ENTRIES)
- return PQI_STATUS_FAILURE;
-
rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle;
disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) +
fst_row * strip_sz +
@@ -737,46 +798,59 @@ int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
rcb->cmdlen = cdb_length;
-
+
DBG_FUNC("OUT");
return PQI_STATUS_SUCCESS;
}
-/* Function used to submit a TMF to the adater */
-int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
- rcb_t *rcb, int req_id, int tmf_type)
+/* Function used to submit an AIO TMF to the adapter
+ * DEVICE_RESET is not supported.
+ */
+static int
+pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
+ rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
{
int rval = PQI_STATUS_SUCCESS;
- pqi_tmf_req_t tmf_req;
+ pqi_aio_tmf_req_t tmf_req;
+ ib_queue_t *op_ib_q = NULL;
- memset(&tmf_req, 0, sizeof(pqi_tmf_req_t));
+ memset(&tmf_req, 0, sizeof(pqi_aio_tmf_req_t));
DBG_FUNC("IN");
- tmf_req.header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
+ tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_TASK_MANAGEMENT;
tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
tmf_req.req_id = rcb->tag;
-
- memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
+ tmf_req.error_idx = rcb->tag;
+ tmf_req.nexus = devp->ioaccel_handle;
+ //memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
tmf_req.tmf = tmf_type;
- tmf_req.req_id_to_manage = req_id;
tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
- tmf_req.obq_id_to_manage = rcb->resp_qid;
+ op_ib_q = &softs->op_aio_ib_q[0];
+
+ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
+ tmf_req.req_id_to_manage = rcb_to_manage->tag;
+ tmf_req.nexus = rcb_to_manage->ioaccel_handle;
+ }
+
+ DBG_INFO("tmf_req.header.iu_type : %x tmf_req.req_id_to_manage :%d \n",tmf_req.header.iu_type,tmf_req.req_id_to_manage);
+ DBG_INFO("tmf_req.req_id : %d tmf_req.nexus : %x tmf_req.tmf %x QID : %d\n",tmf_req.req_id,tmf_req.nexus,tmf_req.tmf,op_ib_q->q_id);
rcb->req_pending = true;
+ /* Timedout tmf response goes here */
+ rcb->error_cmp_callback = pqisrc_process_aio_response_error;
- rval = pqisrc_submit_cmnd(softs,
- &softs->op_raid_ib_q[OS_GET_TMF_REQ_QINDEX(softs, rcb)], &tmf_req);
+ rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
if (rval != PQI_STATUS_SUCCESS) {
DBG_ERR("Unable to submit command rval=%d\n", rval);
return rval;
}
- rval = pqisrc_wait_on_condition(softs, rcb);
+ rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
if (rval != PQI_STATUS_SUCCESS){
DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
- rcb->status = REQUEST_FAILED;
+ rcb->status = rval;
}
if (rcb->status != REQUEST_SUCCESS) {
@@ -788,3 +862,186 @@ int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
DBG_FUNC("OUT");
return rval;
}
+
+/* Function used to submit a Raid TMF to the adapter */
+static int
+pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
+ rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
+{
+ int rval = PQI_STATUS_SUCCESS;
+ pqi_raid_tmf_req_t tmf_req;
+ ib_queue_t *op_ib_q = NULL;
+
+ memset(&tmf_req, 0, sizeof(pqi_raid_tmf_req_t));
+
+ DBG_FUNC("IN");
+
+ tmf_req.header.iu_type = PQI_REQUEST_IU_RAID_TASK_MANAGEMENT;
+ tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
+ tmf_req.req_id = rcb->tag;
+
+ memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
+ tmf_req.tmf = tmf_type;
+ tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
+
+ /* Decide the queue where the tmf request should be submitted */
+ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
+ tmf_req.obq_id_to_manage = rcb_to_manage->resp_qid;
+ tmf_req.req_id_to_manage = rcb_to_manage->tag;
+ }
+
+ if (softs->timeout_in_tmf &&
+ tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) {
+ /* OS_TMF_TIMEOUT_SEC - 1 to accommodate driver processing */
+ tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1;
+ /* if OS tmf timeout is 0, set minimum value for timeout */
+ if (!tmf_req.timeout_in_sec)
+ tmf_req.timeout_in_sec = 1;
+ }
+
+ op_ib_q = &softs->op_raid_ib_q[0];
+ rcb->req_pending = true;
+ /* Timedout tmf response goes here */
+ rcb->error_cmp_callback = pqisrc_process_raid_response_error;
+
+ rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
+ if (rval != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to submit command rval=%d\n", rval);
+ return rval;
+ }
+
+ rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
+ if (rval != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
+ rcb->status = rval;
+ }
+
+ if (rcb->status != REQUEST_SUCCESS) {
+ DBG_NOTE("Task Management failed tmf_type:%d "
+ "stat:0x%x\n", tmf_type, rcb->status);
+ rval = PQI_STATUS_FAILURE;
+ }
+
+ DBG_FUNC("OUT");
+ return rval;
+}
+
+int
+pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
+ rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
+{
+ int ret = PQI_STATUS_SUCCESS;
+
+ DBG_FUNC("IN");
+
+ if(!devp->is_physical_device) {
+ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
+ if(rcb_to_manage->path == AIO_PATH) {
+ if(devp->offload_enabled)
+ ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
+ }
+ else {
+ DBG_INFO("TASK ABORT not supported in raid\n");
+ ret = PQI_STATUS_FAILURE;
+ }
+ }
+ else {
+ ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
+ }
+ } else {
+ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK)
+ ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
+ else
+ ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
+ }
+
+ DBG_FUNC("IN");
+
+ return ret;
+}
+
+/*
+ * Function used to build and send the vendor general request
+ * Used for configuring PQI feature bits between firmware and driver
+ */
+int
+pqisrc_build_send_vendor_request(
+ pqisrc_softstate_t *softs,
+ pqi_vendor_general_request_t *request,
+ raid_path_error_info_elem_t *error_info)
+{
+ int ret = PQI_STATUS_SUCCESS;
+ ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
+ ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
+
+ rcb_t *rcb = NULL;
+
+ uint16_t request_id = 0;
+
+ /* Get the tag */
+ request_id = pqisrc_get_tag(&softs->taglist);
+ if (INVALID_ELEM == request_id) {
+ DBG_ERR("Tag not available\n");
+ ret = PQI_STATUS_FAILURE;
+ goto err_notag;
+ }
+
+ ((pqi_vendor_general_request_t *)request)->request_id = request_id;
+ ((pqi_vendor_general_request_t *)request)->response_queue_id = ob_q->q_id;
+
+ rcb = &softs->rcb[request_id];
+
+ rcb->req_pending = true;
+ rcb->tag = request_id;
+
+ ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
+
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Unable to submit command\n");
+ goto err_out;
+ }
+
+ ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
+ if (ret != PQI_STATUS_SUCCESS) {
+ DBG_ERR("Management request timed out!\n");
+ goto err_out;
+ }
+
+ ret = rcb->status;
+ if (ret) {
+ ret = PQI_STATUS_FAILURE;
+ if(error_info) {
+ // TODO: config table err handling.
+ }
+ } else {
+ if(error_info) {
+ ret = PQI_STATUS_SUCCESS;
+ memset(error_info, 0, sizeof(*error_info));
+ }
+ }
+
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id);
+ DBG_FUNC("OUT\n");
+ return ret;
+
+err_out:
+ DBG_ERR("Vender general request submission failed.\n");
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id);
+err_notag:
+ DBG_FUNC("FAILED \n");
+ return ret;
+}
+
+/* return the path as ASCII-string */
+char *
+io_path_to_ascii(IO_PATH_T path)
+{
+ switch (path)
+ {
+ case AIO_PATH: return "Aio";
+ case RAID_PATH: return "Raid";
+ default: return "Unknown";
+ }
+}
diff --git a/sys/dev/smartpqi/smartpqi_response.c b/sys/dev/smartpqi/smartpqi_response.c
index c243a3b8230a..e89b65db01e2 100644
--- a/sys/dev/smartpqi/smartpqi_response.c
+++ b/sys/dev/smartpqi/smartpqi_response.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,8 +30,8 @@
/*
* Process internal RAID response in the case of success.
*/
-void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,
- rcb_t *rcb)
+void
+pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,rcb_t *rcb)
{
DBG_FUNC("IN");
@@ -45,7 +44,8 @@ void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,
/*
* Process internal RAID response in the case of failure.
*/
-void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
+void
+pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
rcb_t *rcb, uint16_t err_idx)
{
raid_path_error_info_elem_t error_info;
@@ -54,18 +54,25 @@ void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
rcb->error_info = (char *) (softs->err_buf_dma_mem.virt_addr) +
(err_idx * PQI_ERROR_BUFFER_ELEMENT_LENGTH);
- rcb->status = REQUEST_SUCCESS;
+
memcpy(&error_info, rcb->error_info, sizeof(error_info));
DBG_INFO("error_status 0x%x data_in_result 0x%x data_out_result 0x%x\n",
error_info.status, error_info.data_in_result, error_info.data_out_result);
- if (error_info.status != 0)
- rcb->status = REQUEST_FAILED;
- if (error_info.data_in_result != PQI_RAID_DATA_IN_OUT_GOOD)
- rcb->status = REQUEST_FAILED;
- if (error_info.data_out_result != PQI_RAID_DATA_IN_OUT_GOOD)
- rcb->status = REQUEST_FAILED;
+ rcb->status = REQUEST_FAILED;
+
+ switch (error_info.data_out_result) {
+ case PQI_RAID_DATA_IN_OUT_GOOD:
+ if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD)
+ rcb->status = REQUEST_SUCCESS;
+ break;
+ case PQI_RAID_DATA_IN_OUT_UNDERFLOW:
+ if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD ||
+ error_info.status == PQI_RAID_STATUS_CHECK_CONDITION)
+ rcb->status = REQUEST_SUCCESS;
+ break;
+ }
rcb->req_pending = false;
@@ -75,8 +82,8 @@ void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs,
/*
* Process the AIO/RAID IO in the case of success.
*/
-void pqisrc_process_io_response_success(pqisrc_softstate_t *softs,
- rcb_t *rcb)
+void
+pqisrc_process_io_response_success(pqisrc_softstate_t *softs, rcb_t *rcb)
{
DBG_FUNC("IN");
@@ -85,10 +92,64 @@ void pqisrc_process_io_response_success(pqisrc_softstate_t *softs,
DBG_FUNC("OUT");
}
+static void
+pqisrc_extract_sense_data(sense_data_u_t *sense_data, uint8_t *key, uint8_t *asc, uint8_t *ascq)
+{
+ if (sense_data->fixed_format.response_code == SCSI_SENSE_RESPONSE_70 ||
+ sense_data->fixed_format.response_code == SCSI_SENSE_RESPONSE_71)
+ {
+ sense_data_fixed_t *fixed = &sense_data->fixed_format;
+
+ *key = fixed->sense_key;
+ *asc = fixed->sense_code;
+ *ascq = fixed->sense_qual;
+ }
+ else if (sense_data->descriptor_format.response_code == SCSI_SENSE_RESPONSE_72 ||
+ sense_data->descriptor_format.response_code == SCSI_SENSE_RESPONSE_73)
+ {
+ sense_data_descriptor_t *desc = &sense_data->descriptor_format;
+
+ *key = desc->sense_key;
+ *asc = desc->sense_code;
+ *ascq = desc->sense_qual;
+ }
+ else
+ {
+ *key = 0xFF;
+ *asc = 0xFF;
+ *ascq = 0xFF;
+ }
+}
+
+static void
+pqisrc_show_sense_data_simple(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
+{
+ uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF;
+ char *path = io_path_to_ascii(rcb->path);
+ uint8_t key, asc, ascq;
+ pqisrc_extract_sense_data(sense_data, &key, &asc, &ascq);
+
+ DBG_NOTE("[ERR INFO] BTL: %d:%d:%d op=0x%x path=%s K:C:Q: %x:%x:%x\n",
+ rcb->dvp->bus, rcb->dvp->target, rcb->dvp->lun,
+ opcode, path, key, asc, ascq);
+}
+
+void
+pqisrc_show_sense_data_full(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data)
+{
+ pqisrc_print_buffer(softs, "sense data", sense_data, 32, 0);
+
+ pqisrc_show_sense_data_simple(softs, rcb, sense_data);
+
+ /* add more detail here as needed */
+}
+
+
/*
* Process the error info for AIO in the case of failure.
*/
-void pqisrc_process_aio_response_error(pqisrc_softstate_t *softs,
+void
+pqisrc_process_aio_response_error(pqisrc_softstate_t *softs,
rcb_t *rcb, uint16_t err_idx)
{
aio_path_error_info_elem_t *err_info = NULL;
@@ -96,7 +157,7 @@ void pqisrc_process_aio_response_error(pqisrc_softstate_t *softs,
DBG_FUNC("IN");
err_info = (aio_path_error_info_elem_t*)
- softs->err_buf_dma_mem.virt_addr +
+ softs->err_buf_dma_mem.virt_addr +
err_idx;
if(err_info == NULL) {
@@ -112,7 +173,8 @@ void pqisrc_process_aio_response_error(pqisrc_softstate_t *softs,
/*
* Process the error info for RAID IO in the case of failure.
*/
-void pqisrc_process_raid_response_error(pqisrc_softstate_t *softs,
+void
+pqisrc_process_raid_response_error(pqisrc_softstate_t *softs,
rcb_t *rcb, uint16_t err_idx)
{
raid_path_error_info_elem_t *err_info = NULL;
@@ -120,7 +182,7 @@ void pqisrc_process_raid_response_error(pqisrc_softstate_t *softs,
DBG_FUNC("IN");
err_info = (raid_path_error_info_elem_t*)
- softs->err_buf_dma_mem.virt_addr +
+ softs->err_buf_dma_mem.virt_addr +
err_idx;
if(err_info == NULL) {
@@ -136,7 +198,8 @@ void pqisrc_process_raid_response_error(pqisrc_softstate_t *softs,
/*
* Process the Task Management function response.
*/
-int pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
+int
+pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
pqi_tmf_resp_t *tmf_resp)
{
int ret = REQUEST_SUCCESS;
@@ -153,7 +216,7 @@ int pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
ret = REQUEST_SUCCESS;
break;
default:
- DBG_ERR("TMF Failed, Response code : 0x%x\n", tmf_resp->resp_code);
+ DBG_WARN("TMF Failed, Response code : 0x%x\n", tmf_resp->resp_code);
ret = REQUEST_FAILED;
break;
}
@@ -165,21 +228,39 @@ int pqisrc_process_task_management_response(pqisrc_softstate_t *softs,
return ret;
}
+static int
+pqisrc_process_vendor_general_response(pqi_vendor_general_response_t *response)
+{
+
+ int ret = REQUEST_SUCCESS;
+
+ switch(response->status) {
+ case PQI_VENDOR_RESPONSE_IU_SUCCESS:
+ break;
+ case PQI_VENDOR_RESPONSE_IU_UNSUCCESS:
+ case PQI_VENDOR_RESPONSE_IU_INVALID_PARAM:
+ case PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC:
+ ret = REQUEST_FAILED;
+ break;
+ }
+
+ return ret;
+}
+
/*
* Function used to process the response from the adapter
* which is invoked by IRQ handler.
*/
-void
+void
pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
{
ob_queue_t *ob_q;
struct pqi_io_response *response;
uint32_t oq_pi, oq_ci;
+ pqi_scsi_dev_t *dvp = NULL;
DBG_FUNC("IN");
- OS_ATOMIC64_INC(softs, num_intrs);
-
ob_q = &softs->op_ob_q[oq_id - 1]; /* zero for event Q */
oq_ci = ob_q->ci_local;
oq_pi = *(ob_q->pi_virt_addr);
@@ -190,18 +271,42 @@ pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
rcb_t *rcb = NULL;
uint32_t tag = 0;
uint32_t offset;
+ boolean_t os_scsi_cmd = false;
if (oq_pi == oq_ci)
break;
/* Get the response */
offset = oq_ci * ob_q->elem_size;
- response = (struct pqi_io_response *)(ob_q->array_virt_addr +
+ response = (struct pqi_io_response *)(ob_q->array_virt_addr +
offset);
tag = response->request_id;
rcb = &softs->rcb[tag];
- /* Make sure we are processing a valid response. */
- ASSERT(rcb->tag == tag && rcb->req_pending);
- rcb->req_pending = false;
+ /* Make sure we are processing a valid response. */
+ if ((rcb->tag != tag) || (rcb->req_pending == false)) {
+ DBG_ERR("No such request pending with tag : %x", tag);
+ oq_ci = (oq_ci + 1) % ob_q->num_elem;
+ break;
+ }
+ /* Timedout request has been completed. This should not hit,
+ * if timeout is set as TIMEOUT_INFINITE while calling
+ * pqisrc_wait_on_condition(softs,rcb,timeout).
+ */
+ if (rcb->timedout) {
+ DBG_WARN("timed out request completing from firmware, driver already completed it with failure , free the tag %d\n", tag);
+ oq_ci = (oq_ci + 1) % ob_q->num_elem;
+ os_reset_rcb(rcb);
+ pqisrc_put_tag(&softs->taglist, tag);
+ break;
+ }
+
+ if (IS_OS_SCSICMD(rcb)) {
+ dvp = rcb->dvp;
+ if (dvp)
+ os_scsi_cmd = true;
+ else
+ DBG_WARN("Received IO completion for the Null device!!!\n");
+ }
+
DBG_INFO("response.header.iu_type : %x \n", response->header.iu_type);
@@ -209,14 +314,24 @@ pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
rcb->success_cmp_callback(softs, rcb);
+ if (os_scsi_cmd)
+ pqisrc_decrement_device_active_io(softs, dvp);
+
break;
case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
rcb->error_cmp_callback(softs, rcb, LE_16(response->error_index));
+ if (os_scsi_cmd)
+ pqisrc_decrement_device_active_io(softs, dvp);
break;
case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
rcb->req_pending = false;
break;
+ case PQI_RESPONSE_IU_VENDOR_GENERAL:
+ rcb->req_pending = false;
+ rcb->status = pqisrc_process_vendor_general_response(
+ (pqi_vendor_general_response_t *)response);
+ break;
case PQI_RESPONSE_IU_TASK_MANAGEMENT:
rcb->status = pqisrc_process_task_management_response(softs, (void *)response);
break;
@@ -230,7 +345,7 @@ pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id)
}
ob_q->ci_local = oq_ci;
- PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
+ PCI_MEM_PUT32(softs, ob_q->ci_register_abs,
ob_q->ci_register_offset, ob_q->ci_local );
DBG_FUNC("OUT");
}
diff --git a/sys/dev/smartpqi/smartpqi_sis.c b/sys/dev/smartpqi/smartpqi_sis.c
index 71295e3de622..07bde7b54128 100644
--- a/sys/dev/smartpqi/smartpqi_sis.c
+++ b/sys/dev/smartpqi/smartpqi_sis.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +27,9 @@
#include "smartpqi_includes.h"
-/* */
-void sis_disable_msix(pqisrc_softstate_t *softs)
+/* Function for disabling msix interrupots */
+void
+sis_disable_msix(pqisrc_softstate_t *softs)
{
uint32_t db_reg;
@@ -44,25 +44,27 @@ void sis_disable_msix(pqisrc_softstate_t *softs)
DBG_FUNC("OUT\n");
}
-void sis_enable_intx(pqisrc_softstate_t *softs)
+void
+sis_enable_intx(pqisrc_softstate_t *softs)
{
uint32_t db_reg;
DBG_FUNC("IN\n");
db_reg = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
- LEGACY_SIS_IDBR);
+ LEGACY_SIS_IDBR);
db_reg |= SIS_ENABLE_INTX;
PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, db_reg);
- if (pqisrc_sis_wait_for_db_bit_to_clear(softs,SIS_ENABLE_INTX)
- != PQI_STATUS_SUCCESS) {
+ if (pqisrc_sis_wait_for_db_bit_to_clear(softs,SIS_ENABLE_INTX)
+ != PQI_STATUS_SUCCESS) {
DBG_ERR("Failed to wait for enable intx db bit to clear\n");
}
DBG_FUNC("OUT\n");
}
-void sis_disable_intx(pqisrc_softstate_t *softs)
+void
+sis_disable_intx(pqisrc_softstate_t *softs)
{
uint32_t db_reg;
@@ -77,7 +79,8 @@ void sis_disable_intx(pqisrc_softstate_t *softs)
DBG_FUNC("OUT\n");
}
-void sis_disable_interrupt(pqisrc_softstate_t *softs)
+void
+sis_disable_interrupt(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN");
@@ -98,26 +101,29 @@ void sis_disable_interrupt(pqisrc_softstate_t *softs)
DBG_FUNC("OUT");
}
+
/* Trigger a NMI as part of taking controller offline procedure */
-void pqisrc_trigger_nmi_sis(pqisrc_softstate_t *softs)
+void
+pqisrc_trigger_nmi_sis(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
- PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(TRIGGER_NMI_SIS));
DBG_FUNC("OUT\n");
}
/* Switch the adapter back to SIS mode during uninitialization */
-int pqisrc_reenable_sis(pqisrc_softstate_t *softs)
+int
+pqisrc_reenable_sis(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t timeout = SIS_ENABLE_TIMEOUT;
DBG_FUNC("IN\n");
- PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(REENABLE_SIS));
COND_WAIT(((PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R) &
@@ -126,13 +132,14 @@ int pqisrc_reenable_sis(pqisrc_softstate_t *softs)
DBG_WARN(" [ %s ] failed to re enable sis\n",__func__);
ret = PQI_STATUS_TIMEOUT;
}
-
+
DBG_FUNC("OUT\n");
return ret;
}
/* Validate the FW status PQI_CTRL_KERNEL_UP_AND_RUNNING */
-int pqisrc_check_fw_status(pqisrc_softstate_t *softs)
+int
+pqisrc_check_fw_status(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t timeout = SIS_STATUS_OK_TIMEOUT;
@@ -152,8 +159,8 @@ int pqisrc_check_fw_status(pqisrc_softstate_t *softs)
}
/* Function used to submit a SIS command to the adapter */
-static int pqisrc_send_sis_cmd(pqisrc_softstate_t *softs,
- uint32_t *mb)
+static int
+pqisrc_send_sis_cmd(pqisrc_softstate_t *softs, uint32_t *mb)
{
int ret = PQI_STATUS_SUCCESS;
int i = 0;
@@ -163,16 +170,18 @@ static int pqisrc_send_sis_cmd(pqisrc_softstate_t *softs,
DBG_FUNC("IN\n");
+
/* Copy Command to mailbox */
for (i = 0; i < 6; i++)
- PCI_MEM_PUT32(softs, &softs->ioa_reg->mb[i],
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->mb[i],
LEGACY_SIS_SRCV_MAILBOX+i*4, LE_32(mb[i]));
-
- PCI_MEM_PUT32(softs, &softs->ioa_reg->ioa_to_host_db_clr,
+
+ /* TODO : Switch to INTX Mode ?*/
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->ioa_to_host_db_clr,
LEGACY_SIS_ODBR_R, LE_32(0x1000));
/* Submit the command */
- PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
+ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
LEGACY_SIS_IDBR, LE_32(SIS_CMD_SUBMIT));
#ifdef SIS_POLL_WAIT
@@ -214,7 +223,8 @@ err_out:
}
/* First SIS command for the adapter to check PQI support */
-int pqisrc_get_adapter_properties(pqisrc_softstate_t *softs,
+int
+pqisrc_get_adapter_properties(pqisrc_softstate_t *softs,
uint32_t *prop, uint32_t *ext_prop)
{
int ret = PQI_STATUS_SUCCESS;
@@ -236,7 +246,8 @@ int pqisrc_get_adapter_properties(pqisrc_softstate_t *softs,
}
/* Second SIS command to the adapter GET_COMM_PREFERRED_SETTINGS */
-int pqisrc_get_preferred_settings(pqisrc_softstate_t *softs)
+int
+pqisrc_get_preferred_settings(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t mb[6] = {0};
@@ -260,7 +271,8 @@ int pqisrc_get_preferred_settings(pqisrc_softstate_t *softs)
}
/* Get supported PQI capabilities from the adapter */
-int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs)
+int
+pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t mb[6] = {0};
@@ -276,6 +288,8 @@ int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs)
softs->pqi_cap.conf_tab_off = mb[4];
softs->pqi_cap.conf_tab_sz = mb[5];
+ os_update_dma_attributes(softs);
+
DBG_INIT("max_sg_elem = %x\n",
softs->pqi_cap.max_sg_elem);
DBG_INIT("max_transfer_size = %x\n",
@@ -289,7 +303,8 @@ int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs)
}
/* Send INIT STRUCT BASE ADDR - one of the SIS command */
-int pqisrc_init_struct_base(pqisrc_softstate_t *softs)
+int
+pqisrc_init_struct_base(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t elem_size = 0;
@@ -369,7 +384,8 @@ err_out:
* - GET_PQI_CAPABILITIES
* - INIT_STRUCT_BASE ADDR
*/
-int pqisrc_sis_init(pqisrc_softstate_t *softs)
+int
+pqisrc_sis_init(pqisrc_softstate_t *softs)
{
int ret = PQI_STATUS_SUCCESS;
uint32_t prop = 0;
@@ -421,34 +437,51 @@ int pqisrc_sis_init(pqisrc_softstate_t *softs)
goto err_out;
}
+ /* We need to allocate DMA memory here ,
+ * Do any os specific DMA setup.
+ */
+ ret = os_dma_setup(softs);
+ if (ret) {
+ DBG_ERR("Failed to Setup DMA\n");
+ goto err_out;
+ }
+
/* Init struct base addr */
ret = pqisrc_init_struct_base(softs);
if (ret) {
DBG_ERR("Failed to set init struct base addr\n");
- goto err_out;
+ goto err_dma;
}
+
DBG_FUNC("OUT\n");
return ret;
+err_dma:
+ os_dma_destroy(softs);
err_out:
DBG_FUNC("OUT failed\n");
return ret;
}
/* Deallocate the resources used during SIS initialization */
-void pqisrc_sis_uninit(pqisrc_softstate_t *softs)
+void
+pqisrc_sis_uninit(pqisrc_softstate_t *softs)
{
DBG_FUNC("IN\n");
os_dma_mem_free(softs, &softs->err_buf_dma_mem);
+
+ os_dma_destroy(softs);
os_resource_free(softs);
pqi_reset(softs);
+
DBG_FUNC("OUT\n");
}
-int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *softs, uint32_t bit)
+int
+pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *softs, uint32_t bit)
{
int rcode = PQI_STATUS_SUCCESS;
uint32_t db_reg;
diff --git a/sys/dev/smartpqi/smartpqi_structures.h b/sys/dev/smartpqi/smartpqi_structures.h
index df2cec45527a..eb1394d58f39 100644
--- a/sys/dev/smartpqi/smartpqi_structures.h
+++ b/sys/dev/smartpqi/smartpqi_structures.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -61,7 +60,7 @@ struct bmic_host_wellness_time {
struct pqi_dev_adminq_cap {
uint8_t max_admin_ibq_elem;
uint8_t max_admin_obq_elem;
- uint8_t admin_ibq_elem_len;
+ uint8_t admin_ibq_elem_len;
uint8_t admin_obq_elem_len;
uint16_t max_pqi_dev_reset_tmo;
uint8_t res[2];
@@ -91,7 +90,7 @@ struct pqi_registers {
uint64_t admin_obq_elem_array_addr;
uint64_t admin_ibq_ci_addr;
uint64_t admin_obq_pi_addr;
- uint32_t admin_q_param;
+ uint32_t admin_q_param;
uint8_t res3[4];
uint32_t pqi_dev_err;
uint8_t res4[4];
@@ -107,15 +106,15 @@ struct pqi_registers {
*/
struct ioa_registers {
- uint8_t res1[0x18];
+ uint8_t res1[0x18];
uint32_t host_to_ioa_db_mask_clr; /* 18h */
- uint8_t res2[4];
+ uint8_t res2[4];
uint32_t host_to_ioa_db; /* 20h */
uint8_t res3[4];
uint32_t host_to_ioa_db_clr; /* 28h */
uint8_t res4[8];
uint32_t ioa_to_host_glob_int_mask; /* 34h */
- uint8_t res5[0x64];
+ uint8_t res5[0x64];
uint32_t ioa_to_host_db; /* 9Ch */
uint32_t ioa_to_host_db_clr; /* A0h */
uint8_t res6[4];
@@ -131,6 +130,7 @@ struct ioa_registers {
uint32_t mb[8]; /* 1000h */
}OS_ATTRIBUTE_PACKED;
+
/* PQI Preferred settings */
struct pqi_pref_settings {
uint16_t max_cmd_size;
@@ -185,10 +185,10 @@ typedef union pqi_reset_reg {
uint32_t all_bits;
}pqi_reset_reg_t;
-/* Memory descriptor for DMA memory allocation */
+/* Memory descriptor for DMA memory allocation */
typedef struct dma_mem {
void *virt_addr;
- dma_addr_t dma_addr;
+ dma_addr_t dma_addr;
uint32_t size;
uint32_t align;
char *tag;
@@ -222,9 +222,10 @@ union head_list {
};
/* lock-free stack used to push and pop the tag used for IO request */
typedef struct lockless_stack {
- uint32_t *next_index_array;
- uint32_t num_elements;
- volatile union head_list head OS_ATTRIBUTE_ALIGNED(8);
+ uint32_t *next_index_array;
+ uint32_t max_elem;/*No.of total elements*/
+ uint32_t num_elem;/*No.of present elements*/
+ volatile union head_list head OS_ATTRIBUTE_ALIGNED(8);
}lockless_stack_t;
#endif /* LOCKFREE_STACK */
@@ -248,18 +249,19 @@ typedef struct sgl_descriptor
uint64_t addr; /* !< Bytes 0-7. The starting 64-bit memory byte address of the data block. */
uint32_t length; /* !< Bytes 8-11. The length in bytes of the data block. Set to 0x00000000 specifies that no data be transferred. */
uint8_t res[3]; /* !< Bytes 12-14. */
- uint8_t zero : 4; /* !< Byte 15, Bits 0-3. */
- uint8_t type : 4; /* !< Byte 15, Bits 4-7. sgl descriptor type */
+ uint8_t zero : 4; /* !< Byte 15, Bits 0-3. */
+ uint8_t type : 4; /* !< Byte 15, Bits 4-7. sgl descriptor type */
} sg_desc_t;
/* PQI IUs */
-typedef struct iu_header
+typedef struct iu_header
{
uint8_t iu_type;
uint8_t comp_feature;
uint16_t iu_length;
}OS_ATTRIBUTE_PACKED iu_header_t;
+
typedef struct general_admin_request /* REPORT_PQI_DEVICE_CAPABILITY, REPORT_MANUFACTURER_INFO, REPORT_OPERATIONAL_IQ, REPORT_OPERATIONAL_OQ all same layout. */
{
iu_header_t header; /* !< Bytes 0-3. */
@@ -271,7 +273,7 @@ typedef struct general_admin_request /* REPORT_PQI_DEVICE_CAPABILITY, REPORT_MAN
struct {
uint8_t res2[33]; /* !< Bytes 11-43. function specific */
uint32_t buf_size; /* !< Bytes 44-47. size in bytes of the Data-In/Out Buffer */
- sg_desc_t sg_desc; /* !< Bytes 48-63. SGL */
+ sg_desc_t sg_desc; /* !< Bytes 48-63. SGL */
} OS_ATTRIBUTE_PACKED general_func;
struct {
@@ -323,6 +325,7 @@ typedef struct general_admin_request /* REPORT_PQI_DEVICE_CAPABILITY, REPORT_MAN
}OS_ATTRIBUTE_PACKED gen_adm_req_iu_t;
+
typedef struct general_admin_response {
iu_header_t header;
uint16_t res1;
@@ -349,15 +352,15 @@ typedef struct general_admin_response {
typedef struct pqi_event_config_request {
iu_header_t header;
- uint16_t response_queue_id; /* specifies the OQ where the response
- IU is to be delivered */
- uint8_t work_area[2]; /* reserved for driver use */
- uint16_t request_id;
+ uint16_t response_queue_id; /* specifies the OQ where the response
+ IU is to be delivered */
+ uint8_t work_area[2]; /* reserved for driver use */
+ uint16_t request_id;
union {
- uint16_t reserved; /* Report event config iu */
+ uint16_t reserved; /* Report event config iu */
uint16_t global_event_oq_id; /* Set event config iu */
}iu_specific;
- uint32_t buffer_length;
+ uint32_t buffer_length;
sg_desc_t sg_desc;
}pqi_event_config_request_t;
#if 0
@@ -372,9 +375,9 @@ typedef struct pqi_set_event_config_request {
sg_desc_t sg_desc;
}pqi_set_event_config_request_t;
#endif
-
+
/* Report/Set event config data-in/data-out buffer structure */
-
+
#define PQI_MAX_EVENT_DESCRIPTORS 255
struct pqi_event_descriptor {
@@ -382,7 +385,7 @@ struct pqi_event_descriptor {
uint8_t reserved;
uint16_t oq_id;
};
-
+
typedef struct pqi_event_config {
uint8_t reserved[2];
uint8_t num_event_descriptors;
@@ -402,7 +405,7 @@ typedef struct pqi_management_response{
}pqi_management_response_t;
/*Event response IU*/
typedef struct pqi_event_response {
- iu_header_t header;
+ iu_header_t header;
uint16_t reserved1;
uint8_t work_area[2];
uint8_t event_type;
@@ -415,7 +418,7 @@ typedef struct pqi_event_response {
/*event acknowledge IU*/
typedef struct pqi_event_acknowledge_request {
- iu_header_t header;
+ iu_header_t header;
uint16_t reserved1;
uint8_t work_area[2];
uint8_t event_type;
@@ -431,20 +434,114 @@ struct pqi_event {
uint32_t additional_event_id;
};
+typedef struct pqi_vendor_general_request {
+ iu_header_t header;
+ uint16_t response_queue_id;
+ uint8_t work_area[2];
+ uint16_t request_id;
+ uint16_t function_code;
+ union {
+ struct {
+ uint16_t first_section;
+ uint16_t last_section;
+ uint8_t reserved1[48];
+ } OS_ATTRIBUTE_PACKED config_table_update;
+
+ struct {
+ uint64_t buffer_address;
+ uint32_t buffer_length;
+ uint8_t reserved2[40];
+ } OS_ATTRIBUTE_PACKED ofa_memory_allocation;
+ } data;
+} OS_ATTRIBUTE_PACKED pqi_vendor_general_request_t;
+
+typedef struct pqi_vendor_general_response {
+ iu_header_t header;
+ uint16_t reserved1;
+ uint8_t work_area[2];
+ uint16_t request_id;
+ uint16_t function_code;
+ uint16_t status;
+ uint8_t reserved2[2];
+} OS_ATTRIBUTE_PACKED pqi_vendor_general_response_t;
+
typedef struct op_q_params
{
- uint8_t fn_code;
- uint16_t qid;
+ uint8_t fn_code;
+ uint16_t qid;
uint16_t num_elem;
uint16_t elem_len;
uint16_t int_msg_num;
-
+
} OS_ATTRIBUTE_PACKED op_q_params;
-/* Driver will use this structure to interpret the error
+/* "Fixed Format Sense Data" (0x70 or 0x71) (Table 45 in SPC5) */
+typedef struct sense_data_fixed {
+ uint8_t response_code : 7; // Byte 0, 0x70 or 0x71
+ uint8_t valid : 1; // Byte 0, bit 7
+ uint8_t byte_1; // Byte 1
+ uint8_t sense_key : 4; // Byte 2, bit 0-3 (Key)
+ uint8_t byte_2_other : 4; // Byte 2, bit 4-7
+ uint32_t information; // Byte 3-6, big-endian like block # in CDB
+ uint8_t addtnl_length; // Byte 7
+ uint8_t cmd_specific[4]; // Byte 8-11
+ uint8_t sense_code; // Byte 12 (ASC)
+ uint8_t sense_qual; // Byte 13 (ASCQ)
+ uint8_t fru_code; // Byte 14
+ uint8_t sense_key_specific[3]; // Byte 15-17
+ uint8_t addtnl_sense[1]; // Byte 18+
+} OS_ATTRIBUTE_PACKED sense_data_fixed_t;
+
+
+/* Generic Sense Data Descriptor (Table 29 in SPC5) */
+typedef struct descriptor_entry
+{
+ uint8_t desc_type; // Byte 9/0
+ uint8_t desc_type_length; // Byte 10/1
+ union
+ {
+ /* Sense data descriptor specific */
+ uint8_t bytes[1];
+
+ /* Information (Type 0) (Table 31 is SPC5) */
+ struct {
+ uint8_t byte_2_rsvd : 7; // Byte 11/2
+ uint8_t valid : 1; // Byte 11/2, bit 7
+ uint8_t byte_3; // Byte 12/3
+ uint8_t information[8]; // Byte 13-20/4-11
+ } OS_ATTRIBUTE_PACKED type_0;
+
+ }u;
+} OS_ATTRIBUTE_PACKED descriptor_entry_t;
+
+/* "Descriptor Format Sense Data" (0x72 or 0x73) (Table 28 in SPC5) */
+typedef struct sense_data_descriptor {
+ uint8_t response_code : 7; // Byte 0, 0x72 or 0x73
+ uint8_t byte_0_rsvd: 1; // Byte 0, bit 7
+ uint8_t sense_key : 4; // Byte 1, bit 0-3 (Key)
+ uint8_t byte_1_other : 4; // Byte 1, bit 4-7
+ uint8_t sense_code; // Byte 2 (ASC)
+ uint8_t sense_qual; // Byte 3 (ASCQ)
+ uint8_t byte4_6[3]; // Byte 4-6
+ uint8_t more_length; // Byte 7
+ descriptor_entry_t descriptor_list; // Bytes 8+
+
+} OS_ATTRIBUTE_PACKED sense_data_descriptor_t;
+
+typedef union sense_data_u
+{
+ sense_data_fixed_t fixed_format;
+ sense_data_descriptor_t descriptor_format;
+ uint8_t data[256];
+} sense_data_u_t;
+
+
+
+
+/* Driver will use this structure to interpret the error
info element returned from a failed requests */
typedef struct raid_path_error_info_elem {
- uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */
+ uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */
uint8_t data_out_result; /* !< Byte 1. See SOP spec Table 78. */
uint8_t reserved[3]; /* !< Bytes 2-4. */
uint8_t status; /* !< Byte 5. See SAM-5 specification "Status" codes Table 40. Defined in Storport.h */
@@ -453,29 +550,33 @@ typedef struct raid_path_error_info_elem {
uint16_t resp_data_len; /* !< Bytes 10-11. See SOP specification table 79. */
uint32_t data_in_transferred; /* !< Bytes 12-15. If "dada_in_result = 0x01 (DATA_IN BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-In buffer else Ignored. */
uint32_t data_out_transferred; /* !< Bytes 16-19. If "data_out_result = 0x01 (DATA_OUT BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-Out buffer else Ignored. */
- uint8_t data[256]; /* !< Bytes 20-275. Response Data buffer or Sense Data buffer but not both. */
+ union
+ {
+ sense_data_u_t sense_data;
+ uint8_t data[256]; /* !< Bytes 20-275. Response Data buffer or Sense Data buffer but not both. */
+ };
}OS_ATTRIBUTE_PACKED raid_path_error_info_elem_t;
#define PQI_ERROR_BUFFER_ELEMENT_LENGTH sizeof(raid_path_error_info_elem_t)
-typedef enum error_data_present
+typedef enum error_data_present
{
- DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */
+ DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */
DATA_PRESENT_RESPONSE_DATA = 1, /* !< Response data is present in Data buffer. */
DATA_PRESENT_SENSE_DATA = 2 /* !< Sense data is present in Data buffer. */
} error_data_present_t;
-typedef struct aio_path_error_info_elem
+typedef struct aio_path_error_info_elem
{
uint8_t status; /* !< Byte 0. See SAM-5 specification "SCSI Status" codes Table 40. Defined in Storport.h */
- uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */
+ uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */
uint8_t data_pres; /* !< Byte 2. Bits [7:2] reserved. Bits [1:0] - 0=No data, 1=Response data, 2=Sense data. */
- uint8_t reserved1; /* !< Byte 3. Reserved. */
+ uint8_t reserved1; /* !< Byte 3. Reserved. */
uint32_t resd_count; /* !< Bytes 4-7. The residual data length in bytes. Need the original transfer size and if Status is OverRun or UnderRun. */
- uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */
+ uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */
uint16_t reserved2; /* !< Bytes 10. Reserved. */
uint8_t data[256]; /* !< Bytes 11-267. Response data buffer or Sense data buffer but not both. */
- uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */
+ uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */
}OS_ATTRIBUTE_PACKED aio_path_error_info_elem_t;
struct init_base_struct {
@@ -485,7 +586,7 @@ struct init_base_struct {
uint32_t err_buf_paddr_h; /* upper 32 bits of physical address of error buffer */
uint32_t err_buf_elem_len; /* length of each element in error buffer (in bytes) */
uint32_t err_buf_num_elem; /* number of elements in error buffer */
-}OS_ATTRIBUTE_PACKED;
+}OS_ATTRIBUTE_PACKED;
/* Queue details */
typedef struct ib_queue {
@@ -526,6 +627,7 @@ typedef struct pqisrc_sg_desc{
uint32_t flags;
}sgt_t;
+
typedef struct pqi_iu_layer_desc {
uint8_t ib_spanning_supported : 1;
uint8_t res1 : 7;
@@ -537,6 +639,7 @@ typedef struct pqi_iu_layer_desc {
uint16_t max_ob_iu_len;
}OS_ATTRIBUTE_PACKED pqi_iu_layer_desc_t;
+
/* Response IU data */
typedef struct pqi_device_capabilities {
uint16_t length;
@@ -597,40 +700,44 @@ typedef struct pqi_aio_req {
sgt_t sg_desc[4];
}OS_ATTRIBUTE_PACKED pqi_aio_req_t;
+
typedef struct pqisrc_raid_request {
- iu_header_t header;
- uint16_t response_queue_id; /* specifies the OQ where the response
+ iu_header_t header;
+ uint16_t response_queue_id; /* specifies the OQ where the response
IU is to be delivered */
- uint8_t work_area[2]; /* reserved for driver use */
- uint16_t request_id;
- uint16_t nexus_id;
- uint32_t buffer_length;
- uint8_t lun_number[8];
- uint16_t protocol_spec;
- uint8_t data_direction : 2;
- uint8_t partial : 1;
- uint8_t reserved1 : 4;
- uint8_t fence : 1;
- uint16_t error_index;
- uint8_t reserved2;
- uint8_t task_attribute : 3;
- uint8_t command_priority : 4;
- uint8_t reserved3 : 1;
- uint8_t reserved4 : 2;
- uint8_t additional_cdb_bytes_usage : 3;
- uint8_t reserved5 : 3;
- uint8_t cdb[16];
- uint8_t additional_cdb_bytes[16];
- sgt_t sg_descriptors[4];
-}OS_ATTRIBUTE_PACKED pqisrc_raid_req_t;
-
-typedef struct pqi_tmf_req {
+ uint8_t work_area[2]; /* reserved for driver use */
+ uint16_t request_id;
+ uint16_t nexus_id;
+ uint32_t buffer_length;
+ uint8_t lun_number[8];
+ uint16_t protocol_spec;
+ uint8_t data_direction : 2;
+ uint8_t partial : 1;
+ uint8_t reserved1 : 4;
+ uint8_t fence : 1;
+ uint16_t error_index;
+ uint8_t reserved2;
+ uint8_t task_attribute : 3;
+ uint8_t command_priority : 4;
+ uint8_t reserved3 : 1;
+ uint8_t reserved4 : 2;
+ uint8_t additional_cdb_bytes_usage : 3;
+ uint8_t reserved5 : 3;
+ uint8_t cdb[16];
+ uint8_t reserved[12];
+ uint32_t timeout_in_sec;
+ sgt_t sg_descriptors[4];
+} OS_ATTRIBUTE_PACKED pqisrc_raid_req_t;
+
+
+typedef struct pqi_raid_tmf_req {
iu_header_t header;
uint16_t resp_qid;
uint8_t work_area[2];
uint16_t req_id;
uint16_t nexus;
- uint8_t res1[4];
+ uint8_t res1[2];
+ uint16_t timeout_in_sec;
uint8_t lun[8];
uint16_t protocol_spec;
uint16_t obq_id_to_manage;
@@ -638,7 +745,22 @@ typedef struct pqi_tmf_req {
uint8_t tmf;
uint8_t res2 : 7;
uint8_t fence : 1;
-}OS_ATTRIBUTE_PACKED pqi_tmf_req_t;
+} OS_ATTRIBUTE_PACKED pqi_raid_tmf_req_t;
+
+typedef struct pqi_aio_tmf_req {
+ iu_header_t header;
+ uint16_t resp_qid;
+ uint8_t work_area[2];
+ uint16_t req_id;
+ uint16_t res1;
+ uint32_t nexus;
+ uint8_t lun[8];
+ uint32_t req_id_to_manage;
+ uint8_t tmf;
+ uint8_t res2 : 7;
+ uint8_t fence : 1;
+ uint16_t error_idx;
+}OS_ATTRIBUTE_PACKED pqi_aio_tmf_req_t;
typedef struct pqi_tmf_resp {
iu_header_t header;
@@ -650,112 +772,176 @@ typedef struct pqi_tmf_resp {
uint8_t resp_code;
}pqi_tmf_resp_t;
+
struct pqi_io_response {
iu_header_t header;
uint16_t queue_id;
- uint8_t work_area[2];
+ uint8_t work_area[2];
uint16_t request_id;
uint16_t error_index;
uint8_t reserved[4];
}OS_ATTRIBUTE_PACKED;
+
struct pqi_enc_info {
uint16_t data_enc_key_index;
uint32_t encrypt_tweak_lower;
uint32_t encrypt_tweak_upper;
};
+
typedef struct pqi_scsi_device {
device_type_t devtype; /* as reported by INQUIRY commmand */
- uint8_t device_type; /* as reported by
+ uint8_t device_type; /* as reported by
BMIC_IDENTIFY_PHYSICAL_DEVICE - only
valid for devtype = TYPE_DISK */
- int bus;
- int target;
- int lun;
- uint8_t flags;
- uint8_t scsi3addr[8];
+ int bus;
+ int target;
+ int lun;
+ uint8_t flags;
+ uint8_t scsi3addr[8];
uint64_t wwid;
- uint8_t is_physical_device : 1;
- uint8_t is_external_raid_device : 1;
- uint8_t target_lun_valid : 1;
- uint8_t expose_device : 1;
- uint8_t no_uld_attach : 1;
- uint8_t is_obdr_device : 1;
- uint8_t aio_enabled : 1;
- uint8_t device_gone : 1;
- uint8_t new_device : 1;
- uint8_t volume_offline : 1;
- uint8_t vendor[8]; /* bytes 8-15 of inquiry data */
- uint8_t model[16]; /* bytes 16-31 of inquiry data */
+ uint8_t is_physical_device : 1;
+ uint8_t is_external_raid_device : 1;
+ uint8_t target_lun_valid : 1;
+ uint8_t expose_device : 1;
+ uint8_t no_uld_attach : 1;
+ uint8_t is_obdr_device : 1;
+ uint8_t aio_enabled : 1;
+ uint8_t device_gone : 1;
+ uint8_t new_device : 1;
+ uint8_t volume_offline : 1;
+ uint8_t scsi_rescan : 1;
+ uint8_t vendor[8]; /* bytes 8-15 of inquiry data */
+ uint8_t model[16]; /* bytes 16-31 of inquiry data */
uint64_t sas_address;
- uint8_t raid_level;
+ uint8_t raid_level;
uint16_t queue_depth; /* max. queue_depth for this device */
uint16_t advertised_queue_depth;
uint32_t ioaccel_handle;
- uint8_t volume_status;
- uint8_t active_path_index;
- uint8_t path_map;
- uint8_t bay;
- uint8_t box[8];
+ uint8_t volume_status;
+ uint8_t active_path_index;
+ uint8_t path_map;
+ uint8_t bay;
+ uint8_t box[8];
uint16_t phys_connector[8];
- int offload_config; /* I/O accel RAID offload configured */
- int offload_enabled; /* I/O accel RAID offload enabled */
- int offload_enabled_pending;
- int offload_to_mirror; /* Send next I/O accelerator RAID
- offload request to mirror drive. */
+ int offload_config; /* I/O accel RAID offload configured */
+ int offload_enabled; /* I/O accel RAID offload enabled */
+ int offload_enabled_pending;
+ int offload_to_mirror; /* Send next I/O accelerator RAID
+ offload request to mirror drive. */
struct raid_map *raid_map; /* I/O accelerator RAID map */
- int reset_in_progress;
+
+ int reset_in_progress;
+ int logical_unit_number;
os_dev_info_t *dip; /*os specific scsi device information*/
- boolean_t invalid;
+ boolean_t invalid;
+ boolean_t path_destroyed;
+ boolean_t firmware_queue_depth_set;
+ OS_ATOMIC64_T active_requests;
}pqi_scsi_dev_t;
+typedef struct pqisrc_softstate pqisrc_softstate_t;
+typedef struct pqi_firmware_feature pqi_firmware_feature_t;
+typedef void (*feature_status_fn)(pqisrc_softstate_t *softs,
+ pqi_firmware_feature_t *firmware_feature);
+
+struct pqi_firmware_feature {
+ char *feature_name;
+ unsigned int feature_bit;
+ boolean_t supported;
+ boolean_t enabled;
+ feature_status_fn feature_status;
+};
+
+struct pqi_conf_table_firmware_features {
+ struct pqi_conf_table_section_header header;
+ uint16_t num_elements;
+ uint8_t features_supported[];
+};
+
+struct pqi_conf_table_section_info {
+ struct pqisrc_softstate *softs;
+ void *section;
+ uint32_t section_offset;
+ void *section_addr;
+};
+
struct sense_header_scsi { /* See SPC-3 section 4.5 */
- uint8_t response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
- uint8_t sense_key;
- uint8_t asc;
- uint8_t ascq;
- uint8_t byte4;
- uint8_t byte5;
- uint8_t byte6;
- uint8_t additional_length; /* always 0 for fixed sense format */
+ uint8_t response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */
+ uint8_t sense_key;
+ uint8_t asc;
+ uint8_t ascq;
+ uint8_t byte4;
+ uint8_t byte5;
+ uint8_t byte6;
+ uint8_t additional_length; /* always 0 for fixed sense format */
}OS_ATTRIBUTE_PACKED;
+
+
typedef struct report_lun_header {
- uint32_t list_length;
- uint8_t extended_response;
- uint8_t reserved[3];
+ uint32_t list_length;
+ uint8_t extended_response;
+ uint8_t reserved[3];
}OS_ATTRIBUTE_PACKED reportlun_header_t;
+
typedef struct report_lun_ext_entry {
- uint8_t lunid[8];
- uint64_t wwid;
- uint8_t device_type;
- uint8_t device_flags;
- uint8_t lun_count; /* number of LUNs in a multi-LUN device */
- uint8_t redundant_paths;
- uint32_t ioaccel_handle;
+ uint8_t lunid[8];
+ uint64_t wwid;
+ uint8_t device_type;
+ uint8_t device_flags;
+ uint8_t lun_count; /* number of LUNs in a multi-LUN device */
+ uint8_t redundant_paths;
+ uint32_t ioaccel_handle;
}OS_ATTRIBUTE_PACKED reportlun_ext_entry_t;
+
typedef struct report_lun_data_ext {
- reportlun_header_t header;
- reportlun_ext_entry_t lun_entries[1];
+ reportlun_header_t header;
+ reportlun_ext_entry_t lun_entries[1];
}OS_ATTRIBUTE_PACKED reportlun_data_ext_t;
+typedef struct reportlun_queue_depth_entry {
+ uint8_t logical_unit_num;
+ uint8_t reserved_1:6;
+ uint8_t address:2;
+ uint8_t box_bus_num;
+ uint8_t reserved_2:6;
+ uint8_t mode:2;
+ uint8_t bus_ident;
+
+ /* Byte 5 */
+ uint8_t queue_depth:7;
+ uint8_t multiplier:1;
+
+ /* Byte 6 */
+ uint8_t drive_type_mix_flags;
+ uint8_t level_2_bus:6;
+ uint8_t level_2_mode:2;
+ uint8_t unused_bytes[16];
+}OS_ATTRIBUTE_PACKED reportlun_queue_depth_entry_t;
+
+typedef struct reportlun_queue_depth_data {
+ reportlun_header_t header;
+ reportlun_queue_depth_entry_t lun_entries[1]; /* lun list with Queue Depth values for each lun */
+}OS_ATTRIBUTE_PACKED reportlun_queue_depth_data_t;
+
typedef struct raidmap_data {
- uint32_t ioaccel_handle;
- uint8_t xor_mult[2];
- uint8_t reserved[2];
+ uint32_t ioaccel_handle;
+ uint8_t xor_mult[2];
+ uint8_t reserved[2];
}OS_ATTRIBUTE_PACKED raidmap_data_t;
typedef struct raid_map {
uint32_t structure_size; /* size of entire structure in bytes */
uint32_t volume_blk_size; /* bytes / block in the volume */
uint64_t volume_blk_cnt; /* logical blocks on the volume */
- uint8_t phys_blk_shift; /* shift factor to convert between
+ uint8_t phys_blk_shift; /* shift factor to convert between
units of logical blocks and physical
disk blocks */
- uint8_t parity_rotation_shift; /* shift factor to convert between units
+ uint8_t parity_rotation_shift; /* shift factor to convert between units
of logical stripes and physical
stripes */
uint16_t strip_size; /* blocks used on each disk / stripe */
@@ -769,10 +955,11 @@ typedef struct raid_map {
group) */
uint16_t flags;
uint16_t data_encryption_key_index;
- uint8_t reserved[16];
- raidmap_data_t dev_data[RAID_MAP_MAX_ENTRIES];
+ uint8_t reserved[16];
+ raidmap_data_t dev_data[RAID_MAP_MAX_ENTRIES];
}OS_ATTRIBUTE_PACKED pqisrc_raid_map_t;
+
typedef struct bmic_ident_ctrl {
uint8_t conf_ld_count;
uint32_t conf_sign;
@@ -789,79 +976,79 @@ typedef struct bmic_ident_ctrl {
}OS_ATTRIBUTE_PACKED bmic_ident_ctrl_t;
typedef struct bmic_identify_physical_device {
- uint8_t scsi_bus; /* SCSI Bus number on controller */
- uint8_t scsi_id; /* SCSI ID on this bus */
+ uint8_t scsi_bus; /* SCSI Bus number on controller */
+ uint8_t scsi_id; /* SCSI ID on this bus */
uint16_t block_size; /* sector size in bytes */
uint32_t total_blocks; /* number for sectors on drive */
uint32_t reserved_blocks; /* controller reserved (RIS) */
- uint8_t model[40]; /* Physical Drive Model */
- uint8_t serial_number[40]; /* Drive Serial Number */
- uint8_t firmware_revision[8]; /* drive firmware revision */
- uint8_t scsi_inquiry_bits; /* inquiry byte 7 bits */
- uint8_t compaq_drive_stamp; /* 0 means drive not stamped */
- uint8_t last_failure_reason;
- uint8_t flags;
- uint8_t more_flags;
- uint8_t scsi_lun; /* SCSI LUN for phys drive */
- uint8_t yet_more_flags;
- uint8_t even_more_flags;
+ uint8_t model[40]; /* Physical Drive Model */
+ uint8_t serial_number[40]; /* Drive Serial Number */
+ uint8_t firmware_revision[8]; /* drive firmware revision */
+ uint8_t scsi_inquiry_bits; /* inquiry byte 7 bits */
+ uint8_t compaq_drive_stamp; /* 0 means drive not stamped */
+ uint8_t last_failure_reason;
+ uint8_t flags;
+ uint8_t more_flags;
+ uint8_t scsi_lun; /* SCSI LUN for phys drive */
+ uint8_t yet_more_flags;
+ uint8_t even_more_flags;
uint32_t spi_speed_rules;
- uint8_t phys_connector[2]; /* connector number on controller */
- uint8_t phys_box_on_bus; /* phys enclosure this drive resides */
- uint8_t phys_bay_in_box; /* phys drv bay this drive resides */
+ uint8_t phys_connector[2]; /* connector number on controller */
+ uint8_t phys_box_on_bus; /* phys enclosure this drive resides */
+ uint8_t phys_bay_in_box; /* phys drv bay this drive resides */
uint32_t rpm; /* drive rotational speed in RPM */
- uint8_t device_type; /* type of drive */
- uint8_t sata_version; /* only valid when device_type =
+ uint8_t device_type; /* type of drive */
+ uint8_t sata_version; /* only valid when device_type =
BMIC_DEVICE_TYPE_SATA */
uint64_t big_total_block_count;
uint64_t ris_starting_lba;
uint32_t ris_size;
- uint8_t wwid[20];
- uint8_t controller_phy_map[32];
+ uint8_t wwid[20];
+ uint8_t controller_phy_map[32];
uint16_t phy_count;
- uint8_t phy_connected_dev_type[256];
- uint8_t phy_to_drive_bay_num[256];
+ uint8_t phy_connected_dev_type[256];
+ uint8_t phy_to_drive_bay_num[256];
uint16_t phy_to_attached_dev_index[256];
- uint8_t box_index;
- uint8_t reserved;
+ uint8_t box_index;
+ uint8_t reserved;
uint16_t extra_physical_drive_flags;
- uint8_t negotiated_link_rate[256];
- uint8_t phy_to_phy_map[256];
- uint8_t redundant_path_present_map;
- uint8_t redundant_path_failure_map;
- uint8_t active_path_number;
+ uint8_t negotiated_link_rate[256];
+ uint8_t phy_to_phy_map[256];
+ uint8_t redundant_path_present_map;
+ uint8_t redundant_path_failure_map;
+ uint8_t active_path_number;
uint16_t alternate_paths_phys_connector[8];
- uint8_t alternate_paths_phys_box_on_port[8];
- uint8_t multi_lun_device_lun_count;
- uint8_t minimum_good_fw_revision[8];
- uint8_t unique_inquiry_bytes[20];
- uint8_t current_temperature_degreesC;
- uint8_t temperature_threshold_degreesC;
- uint8_t max_temperature_degreesC;
- uint8_t logical_blocks_per_phys_block_exp;
+ uint8_t alternate_paths_phys_box_on_port[8];
+ uint8_t multi_lun_device_lun_count;
+ uint8_t minimum_good_fw_revision[8];
+ uint8_t unique_inquiry_bytes[20];
+ uint8_t current_temperature_degreesC;
+ uint8_t temperature_threshold_degreesC;
+ uint8_t max_temperature_degreesC;
+ uint8_t logical_blocks_per_phys_block_exp;
uint16_t current_queue_depth_limit;
- uint8_t switch_name[10];
+ uint8_t switch_name[10];
uint16_t switch_port;
- uint8_t alternate_paths_switch_name[40];
- uint8_t alternate_paths_switch_port[8];
+ uint8_t alternate_paths_switch_name[40];
+ uint8_t alternate_paths_switch_port[8];
uint16_t power_on_hours;
uint16_t percent_endurance_used;
- uint8_t drive_authentication;
- uint8_t smart_carrier_authentication;
- uint8_t smart_carrier_app_fw_version;
- uint8_t smart_carrier_bootloader_fw_version;
- uint8_t encryption_key_name[64];
+ uint8_t drive_authentication;
+ uint8_t smart_carrier_authentication;
+ uint8_t smart_carrier_app_fw_version;
+ uint8_t smart_carrier_bootloader_fw_version;
+ uint8_t encryption_key_name[64];
uint32_t misc_drive_flags;
uint16_t dek_index;
uint8_t padding[112];
}OS_ATTRIBUTE_PACKED bmic_ident_physdev_t;
typedef struct pqisrc_bmic_flush_cache {
- uint8_t disable_cache;
- uint8_t power_action;
- uint8_t ndu_flush_cache;
- uint8_t halt_event;
- uint8_t reserved[28];
+ uint8_t disable_cache;
+ uint8_t power_action;
+ uint8_t ndu_flush_cache;
+ uint8_t halt_event;
+ uint8_t reserved[28];
} OS_ATTRIBUTE_PACKED pqisrc_bmic_flush_cache_t;
/* for halt_event member of pqisrc_bmic_flush_cache_t */
@@ -873,7 +1060,6 @@ enum pqisrc_flush_cache_event_type {
PQISRC_RESTART = 4
};
-struct pqisrc_softstate;
struct request_container_block;
typedef void (*success_callback)(struct pqisrc_softstate *, struct request_container_block *);
typedef void (*error_callback)(struct pqisrc_softstate *, struct request_container_block *, uint16_t);
@@ -897,14 +1083,20 @@ typedef struct request_container_block {
uint32_t ioaccel_handle;
boolean_t encrypt_enable;
struct pqi_enc_info enc_info;
+ ib_queue_t *req_q;
+ int path;
+ int resp_qid;
+ boolean_t req_pending;
+ boolean_t timedout;
+ int tm_req;
+ int aio_retry;
int cm_flags;
void *cm_data; /* pointer to data in kernel space */
bus_dmamap_t cm_datamap;
uint32_t nseg;
union ccb *cm_ccb;
sgt_t *sgt; /* sg table */
- int resp_qid;
- boolean_t req_pending;
+
}rcb_t;
typedef struct tid_pool {
@@ -912,12 +1104,12 @@ typedef struct tid_pool {
int index;
}tid_pool_t;
-typedef struct pqisrc_softstate {
- OS_SPECIFIC_T os_specific;
- struct ioa_registers *ioa_reg;
- struct pqi_registers *pqi_reg;
- char *pci_mem_base_vaddr;
- PCI_ACC_HANDLE_T pci_mem_handle;
+struct pqisrc_softstate {
+ OS_SPECIFIC_T os_specific;
+ struct ioa_registers *ioa_reg;
+ struct pqi_registers *pqi_reg;
+ uint8_t *pci_mem_base_vaddr;
+ PCI_ACC_HANDLE_T pci_mem_handle;
struct pqi_cap pqi_cap;
struct pqi_pref_settings pref_settings;
char fw_version[11];
@@ -927,7 +1119,7 @@ typedef struct pqisrc_softstate {
uint16_t subvendid; /* sub vendor id */
uint16_t devid; /* device id */
uint16_t subsysid; /* sub system id */
- controller_state_t ctlr_state;
+ controller_state_t ctlr_state;
struct dma_mem err_buf_dma_mem;
struct dma_mem admin_queue_dma_mem;
struct dma_mem op_ibq_dma_mem;
@@ -955,7 +1147,7 @@ typedef struct pqisrc_softstate {
unsigned max_sg_per_iu;
uint8_t ib_spanning_supported : 1;
uint8_t ob_spanning_supported : 1;
- pqi_event_config_t event_config;
+ pqi_event_config_t event_config;
struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
int intr_type;
int intr_count;
@@ -967,19 +1159,18 @@ typedef struct pqisrc_softstate {
#else
lockless_stack_t taglist;
#endif /* LOCKFREE_STACK */
- boolean_t devlist_lockcreated;
+ boolean_t devlist_lockcreated;
OS_LOCK_T devlist_lock OS_ATTRIBUTE_ALIGNED(8);
char devlist_lock_name[LOCKNAME_SIZE];
pqi_scsi_dev_t *device_list[PQI_MAX_DEVICES][PQI_MAX_MULTILUN];
OS_SEMA_LOCK_T scan_lock;
uint8_t lun_count[PQI_MAX_DEVICES];
- uint64_t target_sas_addr[PQI_MAX_EXT_TARGETS];
- OS_ATOMIC64_T num_intrs;
- uint64_t prev_num_intrs;
+ uint64_t target_sas_addr[PQI_MAX_EXT_TARGETS];
uint64_t prev_heartbeat_count;
uint64_t *heartbeat_counter_abs_addr;
uint64_t heartbeat_counter_off;
- uint64_t num_heartbeats_requested;
+ uint8_t *fw_features_section_abs_addr;
+ uint64_t fw_features_section_off;
uint32_t bus_id;
uint32_t device_id;
uint32_t func_id;
@@ -987,7 +1178,21 @@ typedef struct pqisrc_softstate {
boolean_t ctrl_online;
uint8_t pqi_reset_quiesce_allowed : 1;
boolean_t ctrl_in_pqi_mode;
- tid_pool_t tid_pool;
-}pqisrc_softstate_t;
+ tid_pool_t tid_pool;
+ uint32_t adapterQDepth;
+ uint32_t dma_mem_consumed;
+ boolean_t timeout_in_passthrough;
+ boolean_t timeout_in_tmf;
+};
+
+typedef struct vpd_logical_volume_status {
+ uint8_t peripheral_info;
+ uint8_t page_code;
+ uint8_t reserved;
+ uint8_t page_length;
+ uint8_t volume_status;
+ uint8_t reserved2[3];
+ uint32_t flags;
+}vpd_volume_status;
#endif
diff --git a/sys/dev/smartpqi/smartpqi_tag.c b/sys/dev/smartpqi/smartpqi_tag.c
index 125ebf46b446..2346df89102a 100644
--- a/sys/dev/smartpqi/smartpqi_tag.c
+++ b/sys/dev/smartpqi/smartpqi_tag.c
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2018 Microsemi Corporation.
- * All rights reserved.
+ * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,11 +32,12 @@
/*
* Function used to release the tag from taglist.
*/
-void pqisrc_put_tag(pqi_taglist_t *taglist, uint32_t elem)
+void
+pqisrc_put_tag(pqi_taglist_t *taglist, uint32_t elem)
{
OS_ACQUIRE_SPINLOCK(&(taglist->lock));
- /*DBG_FUNC("IN\n");*/
+ DBG_FUNC("IN\n");
ASSERT(taglist->num_elem < taglist->max_elem);
@@ -49,17 +49,18 @@ void pqisrc_put_tag(pqi_taglist_t *taglist, uint32_t elem)
OS_RELEASE_SPINLOCK(&taglist->lock);
- /*DBG_FUNC("OUT\n");*/
+ DBG_FUNC("OUT\n");
}
/*
* Function used to get an unoccupied tag from the tag list.
*/
-uint32_t pqisrc_get_tag(pqi_taglist_t *taglist)
+uint32_t
+pqisrc_get_tag(pqi_taglist_t *taglist)
{
uint32_t elem = INVALID_ELEM;
- /*DBG_FUNC("IN\n");*/
+/* DBG_FUNC("IN\n");*/
OS_ACQUIRE_SPINLOCK(&taglist->lock);
@@ -73,14 +74,15 @@ uint32_t pqisrc_get_tag(pqi_taglist_t *taglist)
OS_RELEASE_SPINLOCK(&taglist->lock);
- /*DBG_FUNC("OUT got %d\n", elem);*/
+/* DBG_FUNC("OUT got %d\n", elem);*/
return elem;
}
/*
* Initialize circular queue implementation of tag list.
*/
-int pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist,
+int
+pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist,
uint32_t max_elem)
{
int ret = PQI_STATUS_SUCCESS;
@@ -93,22 +95,22 @@ int pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist,
taglist->head = 0;
taglist->tail = 0;
taglist->elem_array = os_mem_alloc(softs,
- (max_elem * sizeof(uint32_t)));
+ (max_elem * sizeof(uint32_t)));
if (!(taglist->elem_array)) {
DBG_FUNC("Unable to allocate memory for taglist\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
}
- os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE);
- ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname);
- if(ret){
- DBG_ERR("tag lock initialization failed\n");
- taglist->lockcreated=false;
- goto err_lock;
+ os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE);
+ ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname);
+ if(ret){
+ DBG_ERR("tag lock initialization failed\n");
+ taglist->lockcreated=false;
+ goto err_lock;
}
- taglist->lockcreated = true;
-
+ taglist->lockcreated = true;
+
/* indices 1 to max_elem are considered as valid tags */
for (i=1; i <= max_elem; i++) {
softs->rcb[i].tag = INVALID_ELEM;
@@ -119,8 +121,8 @@ int pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist,
return ret;
err_lock:
- os_mem_free(softs, (char *)taglist->elem_array,
- (taglist->max_elem * sizeof(uint32_t)));
+ os_mem_free(softs, (char *)taglist->elem_array,
+ (taglist->max_elem * sizeof(uint32_t)));
taglist->elem_array = NULL;
err_out:
DBG_FUNC("OUT failed\n");
@@ -130,27 +132,29 @@ err_out:
/*
* Destroy circular queue implementation of tag list.
*/
-void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist)
+void
+pqisrc_destroy_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist)
{
DBG_FUNC("IN\n");
- os_mem_free(softs, (char *)taglist->elem_array,
+ os_mem_free(softs, (char *)taglist->elem_array,
(taglist->max_elem * sizeof(uint32_t)));
taglist->elem_array = NULL;
-
- if(taglist->lockcreated==true){
- os_uninit_spinlock(&taglist->lock);
- taglist->lockcreated = false;
- }
-
+
+ if(taglist->lockcreated==true){
+ os_uninit_spinlock(&taglist->lock);
+ taglist->lockcreated = false;
+ }
+
DBG_FUNC("OUT\n");
}
-#else /* LOCKFREE_STACK */
+#else /* LOCKFREE_STACK */
/*
* Initialize circular queue implementation of tag list.
*/
-int pqisrc_init_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack,
+int
+pqisrc_init_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack,
uint32_t max_elem)
{
int ret = PQI_STATUS_SUCCESS;
@@ -159,21 +163,21 @@ int pqisrc_init_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack,
DBG_FUNC("IN\n");
/* indices 1 to max_elem are considered as valid tags */
- stack->num_elements = max_elem + 1;
- stack->head.data = 0;
+ stack->max_elem = max_elem + 1;
+ stack->head.data = 0;
DBG_INFO("Stack head address :%p\n",&stack->head);
/*Allocate memory for stack*/
stack->next_index_array = (uint32_t*)os_mem_alloc(softs,
- (stack->num_elements * sizeof(uint32_t)));
+ (stack->max_elem * sizeof(uint32_t)));
if (!(stack->next_index_array)) {
DBG_ERR("Unable to allocate memory for stack\n");
ret = PQI_STATUS_FAILURE;
goto err_out;
- }
+ }
/* push all the entries to the stack */
- for (index = 1; index < stack->num_elements ; index++) {
+ for (index = 1; index < stack->max_elem ; index++) {
softs->rcb[index].tag = INVALID_ELEM;
pqisrc_put_tag(stack, index);
}
@@ -188,14 +192,15 @@ err_out:
/*
* Destroy circular queue implementation of tag list.
*/
-void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack)
+void
+pqisrc_destroy_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack)
{
DBG_FUNC("IN\n");
/* de-allocate stack memory */
if (stack->next_index_array) {
os_mem_free(softs,(char*)stack->next_index_array,
- (stack->num_elements * sizeof(uint32_t)));
+ (stack->max_elem * sizeof(uint32_t)));
stack->next_index_array = NULL;
}
@@ -205,22 +210,23 @@ void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack)
/*
* Function used to release the tag from taglist.
*/
-void pqisrc_put_tag(lockless_stack_t *stack, uint32_t index)
+void
+pqisrc_put_tag(lockless_stack_t *stack, uint32_t index)
{
- union head_list cur_head, new_head;
+ union head_list cur_head, new_head;
DBG_FUNC("IN\n");
DBG_INFO("push tag :%d\n",index);
- if ( index >= stack->num_elements ) {
+ if (index >= stack->max_elem) {
ASSERT(false);
- DBG_ERR("Pushed Invalid index\n"); /* stack full */
+ DBG_INFO("Pushed Invalid index\n"); /* stack full */
return;
}
- if ( stack->next_index_array[index] != 0) {
+ if (stack->next_index_array[index] != 0) {
ASSERT(false);
- DBG_ERR("Index already present as tag in the stack\n");
+ DBG_INFO("Index already present as tag in the stack\n");
return;
}
@@ -232,8 +238,8 @@ void pqisrc_put_tag(lockless_stack_t *stack, uint32_t index)
new_head.top.index = index;
/* Create a link to the previous index */
stack->next_index_array[index] = cur_head.top.index;
- }while(OS_ATOMIC64_CAS(&stack->head.data,cur_head.data,new_head.data)
- != cur_head.data);
+ }while(!os_atomic64_cas(&stack->head.data,cur_head.data,new_head.data));
+ stack->num_elem++;
DBG_FUNC("OUT\n");
return;
}
@@ -241,7 +247,8 @@ void pqisrc_put_tag(lockless_stack_t *stack, uint32_t index)
/*
* Function used to get an unoccupied tag from the tag list.
*/
-uint32_t pqisrc_get_tag(lockless_stack_t *stack)
+uint32_t
+pqisrc_get_tag(lockless_stack_t *stack)
{
union head_list cur_head, new_head;
@@ -254,9 +261,9 @@ uint32_t pqisrc_get_tag(lockless_stack_t *stack)
new_head.top.seq_no = cur_head.top.seq_no + 1;
/* update the index at the top of the stack with the next index */
new_head.top.index = stack->next_index_array[cur_head.top.index];
- }while(OS_ATOMIC64_CAS(&stack->head.data,cur_head.data,new_head.data)
- != cur_head.data);
+ }while(!os_atomic64_cas(&stack->head.data,cur_head.data,new_head.data));
stack->next_index_array[cur_head.top.index] = 0;
+ stack->num_elem--;
DBG_INFO("pop tag: %d\n",cur_head.top.index);
DBG_FUNC("OUT\n");