Lines Matching refs:snic

29 #include "snic.h"
39 struct snic *snic = svnic_dev_priv(wq->vdev);
44 SNIC_HOST_INFO(snic->shost,
48 SNIC_TRC(snic->shost->host_no, 0, 0,
63 struct snic *snic = svnic_dev_priv(vdev);
68 spin_lock_irqsave(&snic->wq_lock[q_num], flags);
69 svnic_wq_service(&snic->wq[q_num],
74 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
80 snic_wq_cmpl_handler(struct snic *snic, int work_to_do)
85 snic->s_stats.misc.last_ack_time = jiffies;
86 for (i = 0; i < snic->wq_count; i++) {
87 work_done += svnic_cq_service(&snic->cq[i],
101 struct snic *snic = svnic_dev_priv(wq->vdev);
105 dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
109 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
111 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
117 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
120 snic_pci_unmap_rsp_buf(snic, rqi);
124 snic_req_free(snic, rqi);
125 SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n");
133 snic_select_wq(struct snic *snic)
142 snic_wqdesc_avail(struct snic *snic, int q_num, int req_type)
144 int nr_wqdesc = snic->config.wq_enet_desc_count;
151 SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n");
157 nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs);
163 snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
167 struct snic_fw_stats *fwstats = &snic->s_stats.fw;
176 pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
177 if (dma_mapping_error(&snic->pdev->dev, pa)) {
178 SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
185 q_num = snic_select_wq(snic);
187 spin_lock_irqsave(&snic->wq_lock[q_num], flags);
188 desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
190 dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
192 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
193 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
194 SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
199 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
205 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
214 * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
218 snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi)
224 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
225 list_add_tail(&rqi->list, &snic->spl_cmd_list);
226 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
234 snic_req_init(struct snic *snic, int sg_cnt)
242 rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC);
244 atomic64_inc(&snic->s_stats.io.alloc_fail);
245 SNIC_HOST_ERR(snic->shost,
246 "Failed to allocate memory from snic req pool id = %d\n",
256 rqi->snic = snic;
265 if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl))
266 atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt);
269 atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]);
277 SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi);
286 snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi)
297 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
299 SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n");
317 snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi)
323 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC);
325 SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n");
342 snic_req_free(struct snic *snic, struct snic_req_info *rqi)
348 SNIC_SCSI_DBG(snic->shost,
354 dma_unmap_single(&snic->pdev->dev,
359 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
364 dma_unmap_single(&snic->pdev->dev,
369 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
373 dma_unmap_single(&snic->pdev->dev,
378 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
382 snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
388 dma_unmap_single(&snic->pdev->dev,
398 snic_free_all_untagged_reqs(struct snic *snic)
404 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
405 list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
409 snic_pci_unmap_rsp_buf(snic, rqi);
414 snic_req_free(snic, rqi);
416 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
423 snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi)
427 spin_lock_irqsave(&snic->snic_lock, flags);
428 if (snic->in_remove) {
429 spin_unlock_irqrestore(&snic->snic_lock, flags);
432 spin_unlock_irqrestore(&snic->snic_lock, flags);
434 spin_lock_irqsave(&snic->spl_cmd_lock, flags);
436 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
440 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags);
441 snic_req_free(snic, rqi);
561 snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi)
567 if (duration > atomic64_read(&snic->s_stats.io.max_time))
568 atomic64_set(&snic->s_stats.io.max_time, duration);