Lines Matching refs:srb
181 * srb->segement_x is the hw sg list. It is always allocated as a
228 struct list_head list; /* next/prev ptrs for srb lists */
244 * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the
271 struct list_head srb_going_list; /* head of going srb list */
272 struct list_head srb_waiting_list; /* head of waiting srb list */
305 struct list_head srb_free_list; /* head of free srb list */
334 struct ScsiReqBlk srb;
343 static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
345 static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
347 static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
349 static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
351 static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
353 static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
355 static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
357 static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
359 static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
361 static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
363 static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
365 static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
367 static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
369 static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
373 struct ScsiReqBlk *srb);
376 struct ScsiReqBlk *srb, u16 io_dir);
380 struct ScsiReqBlk *srb);
382 struct ScsiReqBlk *srb);
384 struct ScsiReqBlk *srb);
388 static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
390 struct ScsiReqBlk *srb);
392 struct ScsiReqBlk *srb);
394 struct ScsiReqBlk *srb);
735 static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
737 if (srb->tag_number < 255) {
738 dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
739 srb->tag_number = 255;
775 struct ScsiReqBlk *srb;
821 srb = list_entry(waiting_list_head->next,
825 if (!start_scsi(acb, pos, srb))
826 list_move(&srb->list, &pos->srb_going_list);
855 /* Send SCSI Request Block (srb) to adapter (acb) */
856 static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
858 struct DeviceCtlBlk *dcb = srb->dcb;
863 list_add_tail(&srb->list, &dcb->srb_waiting_list);
868 if (!start_scsi(acb, dcb, srb)) {
869 list_add_tail(&srb->list, &dcb->srb_going_list);
871 list_add(&srb->list, &dcb->srb_waiting_list);
878 struct ScsiReqBlk *srb)
885 srb->dcb = dcb;
886 srb->cmd = cmd;
887 srb->sg_count = 0;
888 srb->total_xfer_length = 0;
889 srb->sg_bus_addr = 0;
890 srb->sg_index = 0;
891 srb->adapter_status = 0;
892 srb->target_status = 0;
893 srb->msg_count = 0;
894 srb->status = 0;
895 srb->flag = 0;
896 srb->state = 0;
897 srb->retry_count = 0;
898 srb->tag_number = TAG_NONE;
899 srb->scsi_phase = PH_BUS_FREE; /* initial phase */
900 srb->end_message = 0;
909 srb->segment_x[0].address);
914 struct SGentry *sgp = srb->segment_x;
916 srb->sg_count = nseg;
921 srb->sg_count);
923 scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
928 srb->total_xfer_length += seglen;
930 sgp += srb->sg_count - 1;
936 if (srb->total_xfer_length > reqlen) {
937 sgp->length -= (srb->total_xfer_length - reqlen);
938 srb->total_xfer_length = reqlen;
943 srb->total_xfer_length % 2) {
944 srb->total_xfer_length++;
948 srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
949 srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
952 srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
955 srb->request_length = srb->total_xfer_length;
981 struct ScsiReqBlk *srb;
1017 srb = list_first_entry_or_null(&acb->srb_free_list,
1019 if (!srb) {
1024 dprintkdbg(DBG_0, "queue_command: No free srb's\n");
1027 list_del(&srb->list);
1029 build_srb(cmd, dcb, srb);
1033 list_add_tail(&srb->list, &dcb->srb_waiting_list);
1037 send_srb(acb, srb);
1056 struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
1063 if (!srb && dcb)
1064 srb = dcb->active_srb;
1065 if (srb) {
1066 if (!srb->cmd)
1067 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
1068 srb, srb->cmd);
1070 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
1072 srb, srb->cmd,
1073 srb->cmd->cmnd[0], srb->cmd->device->id,
1074 (u8)srb->cmd->device->lun);
1076 srb->segment_x, srb->sg_count, srb->sg_index,
1077 srb->total_xfer_length);
1079 srb->state, srb->status, srb->scsi_phase,
1234 struct ScsiReqBlk *srb;
1244 srb = find_cmd(cmd, &dcb->srb_waiting_list);
1245 if (srb) {
1246 list_del(&srb->list);
1247 pci_unmap_srb_sense(acb, srb);
1248 pci_unmap_srb(acb, srb);
1249 free_tag(dcb, srb);
1250 list_add_tail(&srb->list, &acb->srb_free_list);
1255 srb = find_cmd(cmd, &dcb->srb_going_list);
1256 if (srb) {
1268 struct ScsiReqBlk *srb)
1270 u8 *ptr = srb->msgout_buf + srb->msg_count;
1271 if (srb->msg_count > 1) {
1274 srb->msg_count, srb->msgout_buf[0],
1275 srb->msgout_buf[1]);
1289 srb->msg_count += 5;
1290 srb->state |= SRB_DO_SYNC_NEGO;
1296 struct ScsiReqBlk *srb)
1300 u8 *ptr = srb->msgout_buf + srb->msg_count;
1301 if (srb->msg_count > 1) {
1304 srb->msg_count, srb->msgout_buf[0],
1305 srb->msgout_buf[1]);
1312 srb->msg_count += 4;
1313 srb->state |= SRB_DO_WIDE_NEGO;
1342 struct ScsiReqBlk *srb;
1349 srb = acb->active_dcb->active_srb;
1357 struct ScsiReqBlk* srb)
1362 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
1363 dcb->target_id, dcb->target_lun, srb);
1365 srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
1391 srb->cmd,
1397 dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
1413 srb->scsi_phase = PH_BUS_FREE; /* initial phase */
1418 if (srb->flag & AUTO_REQSENSE)
1421 if (((srb->cmd->cmnd[0] == INQUIRY)
1422 || (srb->cmd->cmnd[0] == REQUEST_SENSE)
1423 || (srb->flag & AUTO_REQSENSE))
1429 srb->msgout_buf[0] = identify_message;
1430 srb->msg_count = 1;
1432 srb->state = SRB_MSGOUT;
1436 build_wdtr(acb, dcb, srb);
1442 build_sdtr(acb, dcb, srb);
1447 build_wdtr(acb, dcb, srb);
1450 srb->msg_count = 0;
1456 srb->state = SRB_START_;
1471 srb->cmd, srb->cmd->device->id,
1472 (u8)srb->cmd->device->lun);
1473 srb->state = SRB_READY;
1482 srb->tag_number = tag_number;
1484 srb->state = SRB_START_;
1490 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
1491 srb->cmd->cmnd[0], srb->tag_number);
1492 if (srb->flag & AUTO_REQSENSE) {
1500 ptr = (u8 *)srb->cmd->cmnd;
1501 for (i = 0; i < srb->cmd->cmd_len; i++)
1514 srb->cmd, dcb->target_id, dcb->target_lun);
1515 srb->state = SRB_READY;
1516 free_tag(dcb, srb);
1517 srb->msg_count = 0;
1525 srb->scsi_phase = PH_BUS_FREE; /* initial phase */
1526 dcb->active_srb = srb;
1541 srb->state |= SRB_MSGOUT
1546 struct ScsiReqBlk *srb)
1548 srb->msgout_buf[0] = ABORT;
1549 srb->msg_count = 1;
1551 srb->state &= ~SRB_MSGIN;
1552 srb->state |= SRB_MSGOUT;
1567 struct ScsiReqBlk *srb;
1614 srb = dcb->active_srb;
1617 enable_msgout_abort(acb, srb);
1621 phase = (u16)srb->scsi_phase;
1637 dc395x_statev(acb, srb, &scsi_status);
1644 srb->scsi_phase = scsi_status & PHASEMASK;
1660 dc395x_statev(acb, srb, &scsi_status);
1706 static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1709 dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
1710 if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
1714 srb->state &= ~SRB_MSGOUT;
1718 static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1723 dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
1726 if (!(srb->state & SRB_MSGOUT)) {
1727 srb->state |= SRB_MSGOUT;
1730 srb->cmd); /* So what ? */
1732 if (!srb->msg_count) {
1734 srb->cmd);
1740 ptr = (u8 *)srb->msgout_buf;
1741 for (i = 0; i < srb->msg_count; i++)
1743 srb->msg_count = 0;
1744 if (srb->msgout_buf[0] == MSG_ABORT)
1745 srb->state = SRB_ABORT_SENT;
1751 static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1754 dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
1759 static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1765 dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
1769 if (!(srb->flag & AUTO_REQSENSE)) {
1770 ptr = (u8 *)srb->cmd->cmnd;
1771 for (i = 0; i < srb->cmd->cmd_len; i++) {
1785 srb->state |= SRB_COMMAND;
1795 * the count of remaining bytes in srb->total_xfer_length
1797 static void sg_verify_length(struct ScsiReqBlk *srb)
1801 unsigned idx = srb->sg_index;
1802 struct SGentry *psge = srb->segment_x + idx;
1803 for (; idx < srb->sg_count; psge++, idx++)
1805 if (len != srb->total_xfer_length)
1808 srb->total_xfer_length, len);
1817 static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
1820 u32 xferred = srb->total_xfer_length - left; /* bytes transferred */
1821 struct SGentry *psge = srb->segment_x + srb->sg_index;
1825 xferred, srb->total_xfer_length, left);
1831 sg_verify_length(srb);
1832 srb->total_xfer_length = left; /* update remaining count */
1833 for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
1839 dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
1840 srb->sg_bus_addr, SEGMENTX_LEN,
1844 srb->sg_index = idx;
1845 dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
1846 srb->sg_bus_addr, SEGMENTX_LEN,
1852 sg_verify_length(srb);
1862 static void sg_subtract_one(struct ScsiReqBlk *srb)
1864 sg_update_list(srb, srb->total_xfer_length - 1);
1877 struct ScsiReqBlk *srb)
1902 static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1905 struct DeviceCtlBlk *dcb = srb->dcb;
1909 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
1930 srb->total_xfer_length);
1933 if (!(srb->state & SRB_XFERPAD)) {
1935 srb->status |= PARITY_ERROR;
1974 if (srb->total_xfer_length > DC395x_LASTPIO)
1982 && scsi_bufflen(srb->cmd) % 2) {
1999 srb->total_xfer_length = 0;
2007 srb->total_xfer_length - d_left_counter;
2010 sg_update_list(srb, d_left_counter);
2012 if ((srb->segment_x[srb->sg_index].length ==
2013 diff && scsi_sg_count(srb->cmd))
2020 srb->total_xfer_length - diff;
2021 sg_update_list(srb, d_left_counter);
2022 /*srb->total_xfer_length -= diff; */
2023 /*srb->virt_addr += diff; */
2024 /*if (srb->cmd->use_sg) */
2025 /* srb->sg_index++; */
2030 cleanup_after_transfer(acb, srb);
2035 static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2039 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2042 data_io_transfer(acb, srb, XFERDATAOUT);
2045 static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2051 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2066 if (!(srb->state & SRB_XFERPAD)) {
2072 "Parity Error\n", srb->cmd);
2073 srb->status |= PARITY_ERROR;
2109 << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
2116 (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2121 srb->total_xfer_length, d_left_counter);
2125 && srb->total_xfer_length <= DC395x_LASTPIO) {
2126 size_t left_io = srb->total_xfer_length;
2128 /*u32 addr = (srb->segment_x[srb->sg_index].address); */
2129 /*sg_update_list (srb, d_left_counter); */
2133 (srb->dcb->sync_period & WIDE_SYNC) ?
2135 srb->total_xfer_length);
2136 if (srb->dcb->sync_period & WIDE_SYNC)
2143 size_t offset = srb->request_length - left_io;
2148 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2149 srb->sg_count, &offset, &len);
2163 sg_subtract_one(srb);
2177 if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
2179 if (srb->total_xfer_length > 0) {
2183 srb->total_xfer_length--;
2195 /*srb->total_xfer_length = 0; */
2214 if (srb->dcb->sync_period & WIDE_SYNC)
2236 srb->total_xfer_length = 0;
2238 srb->total_xfer_length = d_left_counter;
2248 sg_update_list(srb, d_left_counter);
2253 cleanup_after_transfer(acb, srb);
2258 static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2262 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2263 data_io_transfer(acb, srb, XFERDATAIN);
2268 struct ScsiReqBlk *srb, u16 io_dir)
2270 struct DeviceCtlBlk *dcb = srb->dcb;
2274 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
2276 srb->total_xfer_length, srb->sg_index, srb->sg_count);
2277 if (srb == acb->tmp_srb)
2279 if (srb->sg_index >= srb->sg_count) {
2284 if (srb->total_xfer_length > DC395x_LASTPIO) {
2293 dump_register_info(acb, dcb, srb);
2301 srb->state |= SRB_DATA_XFER;
2303 if (scsi_sg_count(srb->cmd)) { /* with S/G */
2306 srb->sg_bus_addr +
2308 srb->sg_index);
2311 ((u32)(srb->sg_count -
2312 srb->sg_index) << 3));
2316 srb->segment_x[0].address);
2318 srb->segment_x[0].length);
2322 srb->total_xfer_length);
2336 else if (srb->total_xfer_length > 0) { /* The last four bytes: Do PIO */
2341 srb->state |= SRB_DATA_XFER;
2344 srb->total_xfer_length);
2350 int ln = srb->total_xfer_length;
2351 size_t left_io = srb->total_xfer_length;
2353 if (srb->dcb->sync_period & WIDE_SYNC)
2361 size_t offset = srb->request_length - left_io;
2365 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2366 srb->sg_count, &offset, &len);
2377 sg_subtract_one(srb);
2383 if (srb->dcb->sync_period & WIDE_SYNC) {
2401 if (srb->sg_count) {
2402 srb->adapter_status = H_OVER_UNDER_RUN;
2403 srb->status |= OVER_RUN;
2434 srb->state |= SRB_XFERPAD;
2443 static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2447 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2448 srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2449 srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
2450 srb->state = SRB_COMPLETED;
2457 static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2461 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2462 srb->state = SRB_STATUS;
2484 struct ScsiReqBlk *srb)
2486 srb->msgout_buf[0] = MESSAGE_REJECT;
2487 srb->msg_count = 1;
2489 srb->state &= ~SRB_MSGIN;
2490 srb->state |= SRB_MSGOUT;
2492 srb->msgin_buf[0],
2493 srb->dcb->target_id, srb->dcb->target_lun);
2500 struct ScsiReqBlk *srb = NULL;
2502 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
2503 srb->cmd, tag, srb);
2514 srb = i;
2518 if (!srb)
2522 srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
2524 /*srb->state = SRB_ABORT_SENT; */
2525 enable_msgout_abort(acb, srb);
2528 if (!(srb->state & SRB_DISCONNECT))
2531 memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
2532 srb->state |= dcb->active_srb->state;
2533 srb->state |= SRB_DATA_XFER;
2534 dcb->active_srb = srb;
2536 return srb;
2539 srb = acb->tmp_srb;
2540 srb->state = SRB_UNEXPECT_RESEL;
2541 dcb->active_srb = srb;
2542 srb->msgout_buf[0] = MSG_ABORT_TAG;
2543 srb->msg_count = 1;
2546 return srb;
2561 static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2563 struct DeviceCtlBlk *dcb = srb->dcb;
2572 srb->state &= ~SRB_DO_SYNC_NEGO;
2576 build_wdtr(acb, dcb, srb);
2584 static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2586 struct DeviceCtlBlk *dcb = srb->dcb;
2591 dcb->target_id, srb->msgin_buf[3] << 2,
2592 (250 / srb->msgin_buf[3]),
2593 ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
2594 srb->msgin_buf[4]);
2596 if (srb->msgin_buf[4] > 15)
2597 srb->msgin_buf[4] = 15;
2601 dcb->sync_offset = srb->msgin_buf[4];
2602 if (srb->msgin_buf[4] > dcb->sync_offset)
2603 srb->msgin_buf[4] = dcb->sync_offset;
2605 dcb->sync_offset = srb->msgin_buf[4];
2607 while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
2611 if (srb->msgin_buf[3] < clock_period[bval])
2615 srb->msgin_buf[3] = clock_period[bval];
2618 dcb->min_nego_period = srb->msgin_buf[3];
2633 if (!(srb->state & SRB_DO_SYNC_NEGO)) {
2636 srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
2638 memcpy(srb->msgout_buf, srb->msgin_buf, 5);
2639 srb->msg_count = 5;
2645 build_wdtr(acb, dcb, srb);
2650 srb->state &= ~SRB_DO_SYNC_NEGO;
2658 struct ScsiReqBlk *srb)
2660 struct DeviceCtlBlk *dcb = srb->dcb;
2666 srb->state &= ~SRB_DO_WIDE_NEGO;
2670 build_sdtr(acb, dcb, srb);
2676 static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2678 struct DeviceCtlBlk *dcb = srb->dcb;
2683 if (srb->msgin_buf[3] > wide)
2684 srb->msgin_buf[3] = wide;
2686 if (!(srb->state & SRB_DO_WIDE_NEGO)) {
2690 memcpy(srb->msgout_buf, srb->msgin_buf, 4);
2691 srb->msg_count = 4;
2692 srb->state |= SRB_DO_WIDE_NEGO;
2697 if (srb->msgin_buf[3] > 0)
2701 srb->state &= ~SRB_DO_WIDE_NEGO;
2705 (8 << srb->msgin_buf[3]), dcb->target_id);
2709 build_sdtr(acb, dcb, srb);
2728 static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2732 dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
2734 srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2735 if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
2737 switch (srb->msgin_buf[0]) {
2739 srb->state = SRB_DISCONNECT;
2745 srb =
2747 srb->msgin_buf[1]);
2754 if (srb->state & SRB_DO_SYNC_NEGO) {
2755 msgin_set_async(acb, srb);
2759 if (srb->state & SRB_DO_WIDE_NEGO) {
2760 msgin_set_nowide(acb, srb);
2763 enable_msgout_abort(acb, srb);
2764 /*srb->state |= SRB_ABORT_SENT */
2769 if (srb->msgin_buf[1] == 3
2770 && srb->msgin_buf[2] == EXTENDED_SDTR) {
2771 msgin_set_sync(acb, srb);
2775 if (srb->msgin_buf[1] == 2
2776 && srb->msgin_buf[2] == EXTENDED_WDTR
2777 && srb->msgin_buf[3] <= 2) { /* sanity check ... */
2778 msgin_set_wide(acb, srb);
2781 msgin_reject(acb, srb);
2800 srb->cmd, srb->total_xfer_length);
2810 srb->cmd, dcb->target_id,
2813 enable_msgout_abort(acb, srb);
2818 if (srb->msgin_buf[0] & IDENTIFY_BASE) {
2820 srb->msg_count = 1;
2821 srb->msgout_buf[0] = dcb->identify_msg;
2823 srb->state |= SRB_MSGOUT;
2826 msgin_reject(acb, srb);
2830 srb->state &= ~SRB_MSGIN;
2839 static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2842 dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
2845 if (!(srb->state & SRB_MSGIN)) {
2846 srb->state &= ~SRB_DISCONNECT;
2847 srb->state |= SRB_MSGIN;
2855 static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2861 static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2893 struct ScsiReqBlk *srb;
2906 srb = dcb->active_srb;
2908 dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
2910 srb->scsi_phase = PH_BUS_FREE; /* initial phase */
2913 if (srb->state & SRB_UNEXPECT_RESEL) {
2917 srb->state = 0;
2919 } else if (srb->state & SRB_ABORT_SENT) {
2923 doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
2926 if ((srb->state & (SRB_START_ + SRB_MSGOUT))
2927 || !(srb->
2934 if (srb->state != SRB_START_
2935 && srb->state != SRB_MSGOUT) {
2936 srb->state = SRB_READY;
2939 srb->cmd);
2940 srb->target_status = SCSI_STAT_SEL_TIMEOUT;
2945 "<%02i-%i> SelTO\n", srb->cmd,
2947 if (srb->retry_count++ > DC395x_MAX_RETRIES
2949 srb->target_status =
2953 free_tag(dcb, srb);
2954 list_move(&srb->list, &dcb->srb_waiting_list);
2957 srb->cmd);
2960 } else if (srb->state & SRB_DISCONNECT) {
2972 } else if (srb->state & SRB_COMPLETED) {
2977 free_tag(dcb, srb);
2979 srb->state = SRB_FREE;
2980 srb_done(acb, dcb, srb);
2989 struct ScsiReqBlk *srb = NULL;
3000 srb = dcb->active_srb;
3001 if (!srb) {
3011 srb->cmd, dcb->target_id,
3015 /*srb->state |= SRB_DISCONNECT; */
3017 srb->state = SRB_READY;
3018 free_tag(dcb, srb);
3019 list_move(&srb->list, &dcb->srb_waiting_list);
3046 srb = acb->tmp_srb;
3047 dcb->active_srb = srb;
3050 srb = dcb->active_srb;
3051 if (!srb || !(srb->state & SRB_DISCONNECT)) {
3058 srb = acb->tmp_srb;
3059 srb->state = SRB_UNEXPECT_RESEL;
3060 dcb->active_srb = srb;
3061 enable_msgout_abort(acb, srb);
3064 /*srb->state = SRB_ABORT_SENT; */
3065 enable_msgout_abort(acb, srb);
3067 srb->state = SRB_DATA_XFER;
3071 srb->scsi_phase = PH_BUS_FREE; /* initial phase */
3133 static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3135 struct scsi_cmnd *cmd = srb->cmd;
3141 srb->sg_bus_addr, SEGMENTX_LEN);
3142 dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
3154 struct ScsiReqBlk *srb)
3156 if (!(srb->flag & AUTO_REQSENSE))
3160 srb->segment_x[0].address);
3161 dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
3162 srb->segment_x[0].length, DMA_FROM_DEVICE);
3164 srb->total_xfer_length = srb->xferred;
3165 srb->segment_x[0].address =
3166 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
3167 srb->segment_x[0].length =
3168 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
3177 struct ScsiReqBlk *srb)
3180 struct scsi_cmnd *cmd = srb->cmd;
3184 dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
3185 srb->cmd->device->id, (u8)srb->cmd->device->lun);
3186 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
3187 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
3189 status = srb->target_status;
3190 if (srb->flag & AUTO_REQSENSE) {
3192 pci_unmap_srb_sense(acb, srb);
3196 srb->flag &= ~AUTO_REQSENSE;
3197 srb->adapter_status = 0;
3198 srb->target_status = CHECK_CONDITION << 1;
3251 if (srb->total_xfer_length
3252 && srb->total_xfer_length >= cmd->underflow)
3255 srb->end_message, CHECK_CONDITION);
3260 srb->end_message, CHECK_CONDITION);
3271 request_sense(acb, dcb, srb);
3280 free_tag(dcb, srb);
3281 list_move(&srb->list, &dcb->srb_waiting_list);
3283 srb->adapter_status = 0;
3284 srb->target_status = 0;
3287 srb->adapter_status = H_SEL_TIMEOUT;
3288 srb->target_status = 0;
3291 srb->adapter_status = 0;
3293 SET_RES_MSG(cmd->result, srb->end_message);
3301 status = srb->adapter_status;
3303 srb->target_status = 0;
3305 SET_RES_MSG(cmd->result, srb->end_message);
3306 } else if (srb->status & PARITY_ERROR) {
3308 SET_RES_MSG(cmd->result, srb->end_message);
3311 srb->adapter_status = 0;
3312 srb->target_status = 0;
3321 pci_unmap_srb(acb, srb);
3339 /*if( srb->cmd->cmnd[0] == INQUIRY && */
3354 scsi_set_resid(cmd, srb->total_xfer_length);
3356 cmd->SCp.this_residual = srb->total_xfer_length;
3359 if (srb->total_xfer_length)
3363 cmd->cmnd[0], srb->total_xfer_length);
3366 if (srb != acb->tmp_srb) {
3370 list_move_tail(&srb->list, &acb->srb_free_list);
3388 struct ScsiReqBlk *srb;
3392 list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
3396 p = srb->cmd;
3401 list_del(&srb->list);
3402 free_tag(dcb, srb);
3403 list_add_tail(&srb->list, &acb->srb_free_list);
3405 pci_unmap_srb_sense(acb, srb);
3406 pci_unmap_srb(acb, srb);
3424 list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
3426 p = srb->cmd;
3431 list_move_tail(&srb->list, &acb->srb_free_list);
3433 pci_unmap_srb_sense(acb, srb);
3434 pci_unmap_srb(acb, srb);
3534 struct ScsiReqBlk *srb)
3536 struct scsi_cmnd *cmd = srb->cmd;
3540 srb->flag |= AUTO_REQSENSE;
3541 srb->adapter_status = 0;
3542 srb->target_status = 0;
3548 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
3549 srb->segment_x[0].address;
3550 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
3551 srb->segment_x[0].length;
3552 srb->xferred = srb->total_xfer_length;
3553 /* srb->segment_x : a one entry of S/G list table */
3554 srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
3555 srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
3557 srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
3561 cmd->sense_buffer, srb->segment_x[0].address,
3563 srb->sg_count = 1;
3564 srb->sg_index = 0;
3566 if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
3569 srb->cmd, dcb->target_id, dcb->target_lun);
3570 list_move(&srb->list, &dcb->srb_waiting_list);
4158 acb->srb.segment_x =
4235 acb->tmp_srb = &acb->srb;
4264 /* link static array of srbs into the srb free list */
4410 "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
4559 struct ScsiReqBlk *srb;
4564 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
4565 seq_printf(m, " %p", srb->cmd);
4570 list_for_each_entry(srb, &dcb->srb_going_list, list)
4571 seq_printf(m, " %p", srb->cmd);