Lines Matching defs:hrrq

682  * @hrrq:	hrr queue
688 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
692 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
693 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
713 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
736 spin_lock(&ioa_cfg->hrrq[i]._lock);
737 ioa_cfg->hrrq[i].allow_interrupts = 0;
738 spin_unlock(&ioa_cfg->hrrq[i]._lock);
821 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
837 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
839 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
841 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
856 struct ipr_hrr_queue *hrrq;
859 for_each_hrrq(hrrq, ioa_cfg) {
860 spin_lock(&hrrq->_lock);
862 temp, &hrrq->hrrq_pending_q, queue) {
878 spin_unlock(&hrrq->_lock);
929 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1031 unsigned int hrrq;
1034 hrrq = 0;
1036 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1037 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1039 return hrrq;
1061 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1063 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1415 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2560 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
3251 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3566 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3599 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3602 spin_lock(&ioa_cfg->hrrq[i]._lock);
3603 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3604 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3731 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3738 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
4311 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4885 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
4908 struct ipr_hrr_queue *hrrq;
4916 for_each_hrrq(hrrq, ioa_cfg) {
4917 spin_lock_irqsave(hrrq->lock, flags);
4918 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
4927 spin_unlock_irqrestore(hrrq->lock, flags);
4936 for_each_hrrq(hrrq, ioa_cfg) {
4937 spin_lock_irqsave(hrrq->lock, flags);
4938 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
4947 spin_unlock_irqrestore(hrrq->lock, flags);
4972 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4987 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5032 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5066 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5134 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5196 struct ipr_hrr_queue *hrrq;
5207 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5222 for_each_hrrq(hrrq, ioa_cfg) {
5223 spin_lock(&hrrq->_lock);
5224 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5232 spin_unlock(&hrrq->_lock);
5259 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5282 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5464 struct ipr_hrr_queue *hrrq;
5470 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5472 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5473 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5477 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5498 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5499 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5508 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5510 if (!hrrq->allow_interrupts) {
5511 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5516 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5547 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5566 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5567 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5573 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5576 if (!hrrq->allow_interrupts) {
5577 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5582 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5583 hrrq->toggle_bit) {
5584 irq_poll_sched(&hrrq->iopoll);
5585 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5589 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5590 hrrq->toggle_bit)
5592 if (ipr_process_hrrq(hrrq, -1, &doneq))
5596 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5753 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5768 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
5771 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
5773 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
5855 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
5858 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
5860 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6190 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6215 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6219 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6220 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6223 spin_lock(&ipr_cmd->hrrq->_lock);
6225 spin_unlock(&ipr_cmd->hrrq->_lock);
6251 struct ipr_hrr_queue *hrrq;
6260 hrrq = &ioa_cfg->hrrq[hrrq_id];
6262 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6268 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6269 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6277 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6278 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6282 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6284 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6287 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6332 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6333 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6334 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6335 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6341 if (unlikely(hrrq->ioa_is_dead)) {
6342 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6343 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6353 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6356 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6360 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6364 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6470 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6479 spin_lock(&ioa_cfg->hrrq[i]._lock);
6480 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6481 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6485 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6512 spin_lock(&ioa_cfg->hrrq[j]._lock);
6513 ioa_cfg->hrrq[j].allow_cmds = 1;
6514 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6544 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6879 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7392 &ioa_cfg->hrrq->hrrq_free_q);
7445 struct ipr_hrr_queue *hrrq;
7453 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7468 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7470 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7472 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7474 ((u64) hrrq->host_rrq_dma) & 0xff;
7476 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7478 (sizeof(u32) * hrrq->size) & 0xff;
7486 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7488 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7490 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7492 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7561 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7578 struct ipr_hrr_queue *hrrq;
7580 for_each_hrrq(hrrq, ioa_cfg) {
7581 spin_lock(&hrrq->_lock);
7582 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7585 hrrq->hrrq_start = hrrq->host_rrq;
7586 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7587 hrrq->hrrq_curr = hrrq->hrrq_start;
7588 hrrq->toggle_bit = 1;
7589 spin_unlock(&hrrq->_lock);
7654 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7681 spin_lock(&ioa_cfg->hrrq[i]._lock);
7682 ioa_cfg->hrrq[i].allow_interrupts = 1;
7683 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7723 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8244 struct ipr_hrr_queue *hrrq;
8251 for_each_hrrq(hrrq, ioa_cfg) {
8252 spin_lock(&hrrq->_lock);
8253 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8256 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8260 spin_unlock(&hrrq->_lock);
8285 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8290 if (!hrrq->ioa_is_dead) {
8292 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8411 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8460 &ipr_cmd->hrrq->hrrq_free_q);
8499 spin_lock(&ioa_cfg->hrrq[i]._lock);
8500 ioa_cfg->hrrq[i].allow_cmds = 0;
8501 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8504 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8535 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8551 spin_lock(&ioa_cfg->hrrq[i]._lock);
8552 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8553 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8563 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8593 spin_lock(&ioa_cfg->hrrq[i]._lock);
8594 ioa_cfg->hrrq[i].allow_interrupts = 0;
8595 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8598 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8687 spin_lock(&ioa_cfg->hrrq[i]._lock);
8688 ioa_cfg->hrrq[i].allow_cmds = 0;
8689 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8802 sizeof(u32) * ioa_cfg->hrrq[i].size,
8803 ioa_cfg->hrrq[i].host_rrq,
8804 ioa_cfg->hrrq[i].host_rrq_dma);
8836 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
8898 ioa_cfg->hrrq[i].min_cmd_id = 0;
8899 ioa_cfg->hrrq[i].max_cmd_id =
8905 ioa_cfg->hrrq[i].min_cmd_id =
8908 ioa_cfg->hrrq[i].max_cmd_id =
8914 ioa_cfg->hrrq[i].min_cmd_id = 0;
8915 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8917 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8923 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8925 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8926 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8968 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8969 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8970 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9014 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9015 sizeof(u32) * ioa_cfg->hrrq[i].size,
9016 &ioa_cfg->hrrq[i].host_rrq_dma,
9019 if (!ioa_cfg->hrrq[i].host_rrq) {
9022 sizeof(u32) * ioa_cfg->hrrq[i].size,
9023 ioa_cfg->hrrq[i].host_rrq,
9024 ioa_cfg->hrrq[i].host_rrq_dma);
9027 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9076 sizeof(u32) * ioa_cfg->hrrq[i].size,
9077 ioa_cfg->hrrq[i].host_rrq,
9078 ioa_cfg->hrrq[i].host_rrq_dma);
9219 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9220 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9221 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9222 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9224 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9226 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9291 &ioa_cfg->hrrq[i]);
9295 &ioa_cfg->hrrq[i]);
9609 &ioa_cfg->hrrq[0]);
9615 IPR_NAME, &ioa_cfg->hrrq[0]);
9716 spin_lock(&ioa_cfg->hrrq[i]._lock);
9717 ioa_cfg->hrrq[i].removing_ioa = 1;
9718 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9844 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
9875 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10034 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10059 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||