Lines Matching defs:qdio
22 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
25 struct zfcp_adapter *adapter = qdio->adapter;
50 static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
56 span = (now - qdio->req_q_time) >> 12;
57 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
58 qdio->req_q_util += used * span;
59 qdio->req_q_time = now;
66 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
69 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
74 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
76 spin_lock_irq(&qdio->stat_lock);
77 zfcp_qdio_account(qdio);
78 spin_unlock_irq(&qdio->stat_lock);
79 atomic_add(count, &qdio->req_q_free);
80 wake_up(&qdio->req_q_wq);
87 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
88 struct zfcp_adapter *adapter = qdio->adapter;
100 sbale = qdio->res_q[idx]->element;
109 pl[sbal_no] = qdio->res_q[sbal_idx];
113 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
124 zfcp_fsf_reqid_check(qdio, sbal_idx);
131 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
135 zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
140 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
148 sbale = zfcp_qdio_sbale_req(qdio, q_req);
163 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
170 zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
172 if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
173 return zfcp_qdio_sbal_chain(qdio, q_req);
175 return zfcp_qdio_sbale_curr(qdio, q_req);
180 * @qdio: pointer to struct zfcp_qdio
185 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
191 sbale = zfcp_qdio_sbale_req(qdio, q_req);
195 sbale = zfcp_qdio_sbale_next(qdio, q_req);
197 atomic_inc(&qdio->req_q_full);
198 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
208 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
210 if (atomic_read(&qdio->req_q_free) ||
211 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
218 * @qdio: pointer to struct zfcp_qdio
226 int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
230 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
231 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
233 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
240 atomic_inc(&qdio->req_q_full);
242 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
250 * @qdio: pointer to struct zfcp_qdio
254 int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
259 spin_lock(&qdio->stat_lock);
260 zfcp_qdio_account(qdio);
261 spin_unlock(&qdio->stat_lock);
263 atomic_sub(sbal_number, &qdio->req_q_free);
265 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
270 atomic_add(sbal_number, &qdio->req_q_free);
271 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
277 qdio->req_q_idx += sbal_number;
278 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
285 * @qdio: pointer to struct zfcp_qdio
289 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
293 ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
297 ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
301 init_waitqueue_head(&qdio->req_q_wq);
303 ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
310 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
312 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
317 * zfcp_close_qdio - close qdio queues for an adapter
318 * @qdio: pointer to structure zfcp_qdio
320 void zfcp_qdio_close(struct zfcp_qdio *qdio)
322 struct zfcp_adapter *adapter = qdio->adapter;
329 spin_lock_irq(&qdio->req_q_lock);
331 spin_unlock_irq(&qdio->req_q_lock);
333 wake_up(&qdio->req_q_wq);
338 count = atomic_read(&qdio->req_q_free);
340 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
342 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
344 qdio->req_q_idx = 0;
345 atomic_set(&qdio->req_q_free, 0);
349 const struct zfcp_qdio *const qdio)
356 shost->sg_tablesize = qdio->max_sbale_per_req;
357 shost->max_sectors = qdio->max_sbale_per_req * 8;
362 * @qdio: pointer to struct zfcp_qdio
365 int zfcp_qdio_open(struct zfcp_qdio *qdio)
367 struct qdio_buffer **input_sbals[1] = {qdio->res_q};
368 struct qdio_buffer **output_sbals[1] = {qdio->req_q};
371 struct zfcp_adapter *adapter = qdio->adapter;
380 &qdio->adapter->status);
390 init_data.int_parm = (unsigned long) qdio;
404 &qdio->adapter->status);
408 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
411 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
414 qdio->max_sbale_per_req =
415 ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
421 sbale = &(qdio->res_q[cc]->element[0]);
432 qdio->req_q_idx = 0;
433 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
434 atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
436 zfcp_qdio_shost_update(adapter, qdio);
448 void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
450 if (!qdio)
453 if (qdio->adapter->ccw_device)
454 qdio_free(qdio->adapter->ccw_device);
456 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
457 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
458 kfree(qdio);
463 struct zfcp_qdio *qdio;
465 qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
466 if (!qdio)
469 qdio->adapter = adapter;
471 if (zfcp_qdio_allocate(qdio)) {
472 kfree(qdio);
476 spin_lock_init(&qdio->req_q_lock);
477 spin_lock_init(&qdio->stat_lock);
479 adapter->qdio = qdio;
489 * triggered once before going through qdio shutdown.
491 * The triggers are always run from qdio tasklet context, so no