Lines Matching defs:hrrq
691 * @hrrq: hrr queue
697 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
701 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
702 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
722 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
745 spin_lock(&ioa_cfg->hrrq[i]._lock);
746 ioa_cfg->hrrq[i].allow_interrupts = 0;
747 spin_unlock(&ioa_cfg->hrrq[i]._lock);
829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
844 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
847 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
849 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
872 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
888 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
890 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
892 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
907 struct ipr_hrr_queue *hrrq;
910 for_each_hrrq(hrrq, ioa_cfg) {
911 spin_lock(&hrrq->_lock);
913 temp, &hrrq->hrrq_pending_q, queue) {
931 spin_unlock(&hrrq->_lock);
982 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1084 unsigned int hrrq;
1087 hrrq = 0;
1089 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1090 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1092 return hrrq;
1114 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1116 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1504 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2649 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
3340 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3655 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3688 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3691 spin_lock(&ioa_cfg->hrrq[i]._lock);
3692 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3693 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3820 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3827 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
4403 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
5087 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5127 struct ipr_hrr_queue *hrrq;
5135 for_each_hrrq(hrrq, ioa_cfg) {
5136 spin_lock_irqsave(hrrq->lock, flags);
5137 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5146 spin_unlock_irqrestore(hrrq->lock, flags);
5155 for_each_hrrq(hrrq, ioa_cfg) {
5156 spin_lock_irqsave(hrrq->lock, flags);
5157 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5166 spin_unlock_irqrestore(hrrq->lock, flags);
5191 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5206 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5261 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5342 struct ipr_hrr_queue *hrrq;
5355 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5358 for_each_hrrq(hrrq, ioa_cfg) {
5359 spin_lock(&hrrq->_lock);
5360 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5376 spin_unlock(&hrrq->_lock);
5453 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5515 struct ipr_hrr_queue *hrrq;
5526 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5541 for_each_hrrq(hrrq, ioa_cfg) {
5542 spin_lock(&hrrq->_lock);
5543 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5551 spin_unlock(&hrrq->_lock);
5578 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5601 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5783 struct ipr_hrr_queue *hrrq;
5789 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5791 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5792 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5796 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5817 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5818 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5827 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5829 if (!hrrq->allow_interrupts) {
5830 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5835 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5866 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5885 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5886 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5892 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5895 if (!hrrq->allow_interrupts) {
5896 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5901 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5902 hrrq->toggle_bit) {
5903 irq_poll_sched(&hrrq->iopoll);
5904 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5908 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5909 hrrq->toggle_bit)
5911 if (ipr_process_hrrq(hrrq, -1, &doneq))
5915 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6072 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6087 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6090 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6092 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6174 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6177 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6179 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6509 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6534 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6538 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6539 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6542 spin_lock(&ipr_cmd->hrrq->_lock);
6544 spin_unlock(&ipr_cmd->hrrq->_lock);
6570 struct ipr_hrr_queue *hrrq;
6586 hrrq = &ioa_cfg->hrrq[hrrq_id];
6588 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6594 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6595 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6603 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6604 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6608 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6610 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6613 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6658 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6659 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6660 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6661 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6667 if (unlikely(hrrq->ioa_is_dead)) {
6668 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6669 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6679 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6682 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6686 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6690 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6792 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6823 struct ipr_hrr_queue *hrrq;
6833 for_each_hrrq(hrrq, ioa_cfg) {
6834 spin_lock(&hrrq->_lock);
6835 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6841 spin_unlock(&hrrq->_lock);
6890 spin_lock(&ipr_cmd->hrrq->_lock);
6906 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6907 spin_unlock(&ipr_cmd->hrrq->_lock);
7015 struct ipr_hrr_queue *hrrq;
7019 hrrq = &ioa_cfg->hrrq[hrrq_id];
7022 spin_lock(&hrrq->_lock);
7023 if (unlikely(hrrq->ioa_is_dead)) {
7024 spin_unlock(&hrrq->_lock);
7028 if (unlikely(!hrrq->allow_cmds)) {
7029 spin_unlock(&hrrq->_lock);
7033 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7035 spin_unlock(&hrrq->_lock);
7040 spin_unlock(&hrrq->_lock);
7069 spin_lock(&ipr_cmd->hrrq->_lock);
7070 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7071 ipr_cmd->hrrq->ioa_is_dead)) {
7072 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7073 spin_unlock(&ipr_cmd->hrrq->_lock);
7089 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7129 spin_unlock(&ipr_cmd->hrrq->_lock);
7134 spin_unlock(&ipr_cmd->hrrq->_lock);
7243 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7252 spin_lock(&ioa_cfg->hrrq[i]._lock);
7253 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7254 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7258 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7285 spin_lock(&ioa_cfg->hrrq[j]._lock);
7286 ioa_cfg->hrrq[j].allow_cmds = 1;
7287 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7317 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7652 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8165 &ioa_cfg->hrrq->hrrq_free_q);
8218 struct ipr_hrr_queue *hrrq;
8226 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8241 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8243 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8245 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8247 ((u64) hrrq->host_rrq_dma) & 0xff;
8249 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8251 (sizeof(u32) * hrrq->size) & 0xff;
8259 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8261 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8263 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8265 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8334 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8351 struct ipr_hrr_queue *hrrq;
8353 for_each_hrrq(hrrq, ioa_cfg) {
8354 spin_lock(&hrrq->_lock);
8355 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8358 hrrq->hrrq_start = hrrq->host_rrq;
8359 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8360 hrrq->hrrq_curr = hrrq->hrrq_start;
8361 hrrq->toggle_bit = 1;
8362 spin_unlock(&hrrq->_lock);
8427 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8454 spin_lock(&ioa_cfg->hrrq[i]._lock);
8455 ioa_cfg->hrrq[i].allow_interrupts = 1;
8456 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8496 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9017 struct ipr_hrr_queue *hrrq;
9024 for_each_hrrq(hrrq, ioa_cfg) {
9025 spin_lock(&hrrq->_lock);
9026 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9029 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9033 spin_unlock(&hrrq->_lock);
9058 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9063 if (!hrrq->ioa_is_dead) {
9065 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9184 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9233 &ipr_cmd->hrrq->hrrq_free_q);
9272 spin_lock(&ioa_cfg->hrrq[i]._lock);
9273 ioa_cfg->hrrq[i].allow_cmds = 0;
9274 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9277 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9308 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9324 spin_lock(&ioa_cfg->hrrq[i]._lock);
9325 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9326 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9336 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9366 spin_lock(&ioa_cfg->hrrq[i]._lock);
9367 ioa_cfg->hrrq[i].allow_interrupts = 0;
9368 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9371 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9460 spin_lock(&ioa_cfg->hrrq[i]._lock);
9461 ioa_cfg->hrrq[i].allow_cmds = 0;
9462 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9578 sizeof(u32) * ioa_cfg->hrrq[i].size,
9579 ioa_cfg->hrrq[i].host_rrq,
9580 ioa_cfg->hrrq[i].host_rrq_dma);
9612 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9674 ioa_cfg->hrrq[i].min_cmd_id = 0;
9675 ioa_cfg->hrrq[i].max_cmd_id =
9681 ioa_cfg->hrrq[i].min_cmd_id =
9684 ioa_cfg->hrrq[i].max_cmd_id =
9690 ioa_cfg->hrrq[i].min_cmd_id = 0;
9691 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9693 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9699 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9701 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9702 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9744 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9745 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9746 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9790 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9791 sizeof(u32) * ioa_cfg->hrrq[i].size,
9792 &ioa_cfg->hrrq[i].host_rrq_dma,
9795 if (!ioa_cfg->hrrq[i].host_rrq) {
9798 sizeof(u32) * ioa_cfg->hrrq[i].size,
9799 ioa_cfg->hrrq[i].host_rrq,
9800 ioa_cfg->hrrq[i].host_rrq_dma);
9803 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9852 sizeof(u32) * ioa_cfg->hrrq[i].size,
9853 ioa_cfg->hrrq[i].host_rrq,
9854 ioa_cfg->hrrq[i].host_rrq_dma);
9995 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9996 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9997 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9998 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
10000 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
10002 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
10067 &ioa_cfg->hrrq[i]);
10071 &ioa_cfg->hrrq[i]);
10387 &ioa_cfg->hrrq[0]);
10393 IPR_NAME, &ioa_cfg->hrrq[0]);
10494 spin_lock(&ioa_cfg->hrrq[i]._lock);
10495 ioa_cfg->hrrq[i].removing_ioa = 1;
10496 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10627 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10658 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10817 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10842 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||