Lines Matching refs:scrq

2849 				   struct ibmvnic_sub_crq_queue *scrq)
2853 if (!scrq) {
2854 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
2858 if (scrq->irq) {
2859 free_irq(scrq->irq, scrq);
2860 irq_dispose_mapping(scrq->irq);
2861 scrq->irq = 0;
2863 if (scrq->msgs) {
2864 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2865 atomic_set(&scrq->used, 0);
2866 scrq->cur = 0;
2868 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
2872 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2873 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2902 struct ibmvnic_sub_crq_queue *scrq,
2915 scrq->crq_num);
2921 scrq->crq_num, rc);
2925 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2927 free_pages((unsigned long)scrq->msgs, 2);
2928 kfree(scrq);
2935 struct ibmvnic_sub_crq_queue *scrq;
2938 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2939 if (!scrq)
2942 scrq->msgs =
2944 if (!scrq->msgs) {
2949 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2951 if (dma_mapping_error(dev, scrq->msg_token)) {
2956 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2957 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2969 scrq->adapter = adapter;
2970 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2971 spin_lock_init(&scrq->lock);
2975 scrq->crq_num, scrq->hw_irq, scrq->irq);
2977 return scrq;
2980 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2983 free_pages((unsigned long)scrq->msgs, 2);
2985 kfree(scrq);
3042 struct ibmvnic_sub_crq_queue *scrq)
3048 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3050 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3051 scrq->hw_irq, rc);
3056 struct ibmvnic_sub_crq_queue *scrq)
3061 if (scrq->hw_irq > 0x100000000ULL) {
3062 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3068 u64 val = (0xff000000) | scrq->hw_irq;
3080 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3082 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3083 scrq->hw_irq, rc);
3088 struct ibmvnic_sub_crq_queue *scrq)
3098 while (pending_scrq(adapter, scrq)) {
3099 unsigned int pool = scrq->pool_index;
3109 next = ibmvnic_next_scrq(adapter, scrq);
3143 /* remove tx_comp scrq*/
3146 if (atomic_sub_return(num_entries, &scrq->used) <=
3149 scrq->pool_index)) {
3150 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3152 scrq->pool_index);
3156 enable_scrq_irq(adapter, scrq);
3158 if (pending_scrq(adapter, scrq)) {
3159 disable_scrq_irq(adapter, scrq);
3168 struct ibmvnic_sub_crq_queue *scrq = instance;
3169 struct ibmvnic_adapter *adapter = scrq->adapter;
3171 disable_scrq_irq(adapter, scrq);
3172 ibmvnic_complete_tx(adapter, scrq);
3179 struct ibmvnic_sub_crq_queue *scrq = instance;
3180 struct ibmvnic_adapter *adapter = scrq->adapter;
3188 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3190 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3191 disable_scrq_irq(adapter, scrq);
3192 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3201 struct ibmvnic_sub_crq_queue *scrq;
3208 scrq = adapter->tx_scrq[i];
3209 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3211 if (!scrq->irq) {
3217 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3219 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3220 0, scrq->name, scrq);
3224 scrq->irq, rc);
3225 irq_dispose_mapping(scrq->irq);
3233 scrq = adapter->rx_scrq[i];
3234 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3235 if (!scrq->irq) {
3240 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3242 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3243 0, scrq->name, scrq);
3246 scrq->irq, rc);
3247 irq_dispose_mapping(scrq->irq);
3501 struct ibmvnic_sub_crq_queue *scrq)
3503 union sub_crq *entry = &scrq->msgs[scrq->cur];
3512 struct ibmvnic_sub_crq_queue *scrq)
3517 spin_lock_irqsave(&scrq->lock, flags);
3518 entry = &scrq->msgs[scrq->cur];
3520 if (++scrq->cur == scrq->size)
3521 scrq->cur = 0;
3525 spin_unlock_irqrestore(&scrq->lock, flags);
5506 ret += 4 * PAGE_SIZE; /* the scrq message queue */