Lines Matching defs:ccp

15 #include <linux/ccp.h>
17 #include "ccp-dev.h"
22 struct ccp_device *ccp = cmd_q->ccp;
25 mutex_lock(&ccp->sb_mutex);
27 start = (u32)bitmap_find_next_zero_area(ccp->sb,
28 ccp->sb_count,
29 ccp->sb_start,
31 if (start <= ccp->sb_count) {
32 bitmap_set(ccp->sb, start, count);
34 mutex_unlock(&ccp->sb_mutex);
38 ccp->sb_avail = 0;
40 mutex_unlock(&ccp->sb_mutex);
43 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
53 struct ccp_device *ccp = cmd_q->ccp;
58 mutex_lock(&ccp->sb_mutex);
60 bitmap_clear(ccp->sb, start - KSB_START, count);
62 ccp->sb_avail = 1;
64 mutex_unlock(&ccp->sb_mutex);
66 wake_up_interruptible_all(&ccp->sb_queue);
77 struct ccp_device *ccp = cmd_q->ccp;
101 cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
103 mutex_lock(&ccp->req_mutex);
111 iowrite32(cr0, ccp->io_regs + CMD_REQ0);
113 mutex_unlock(&ccp->req_mutex);
124 ccp_log_error(cmd_q->ccp,
127 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
137 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
314 static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
316 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
319 static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
321 iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
326 struct ccp_device *ccp = (struct ccp_device *)data;
331 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
333 for (i = 0; i < ccp->cmd_q_count; i++) {
334 cmd_q = &ccp->cmd_q[i];
349 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
353 ccp_enable_queue_interrupts(ccp);
358 struct ccp_device *ccp = (struct ccp_device *)data;
360 ccp_disable_queue_interrupts(ccp);
361 if (ccp->use_tasklet)
362 tasklet_schedule(&ccp->irq_tasklet);
364 ccp_irq_bh((unsigned long)ccp);
369 static int ccp_init(struct ccp_device *ccp)
371 struct device *dev = ccp->dev;
379 ccp->qim = 0;
380 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
381 for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
387 ccp->name, i);
397 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
398 ccp->cmd_q_count++;
400 cmd_q->ccp = ccp;
405 cmd_q->sb_key = KSB_START + ccp->sb_start++;
406 cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
407 ccp->sb_count -= 2;
412 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
414 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
424 ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
428 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
434 if (ccp->cmd_q_count == 0) {
439 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
442 ccp_disable_queue_interrupts(ccp);
443 for (i = 0; i < ccp->cmd_q_count; i++) {
444 cmd_q = &ccp->cmd_q[i];
449 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
452 ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp);
459 if (ccp->use_tasklet)
460 tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
461 (unsigned long)ccp);
465 for (i = 0; i < ccp->cmd_q_count; i++) {
468 cmd_q = &ccp->cmd_q[i];
471 "%s-q%u", ccp->name, cmd_q->id);
485 ccp_enable_queue_interrupts(ccp);
488 ccp_add_device(ccp);
490 ret = ccp_register_rng(ccp);
495 ret = ccp_dmaengine_register(ccp);
502 ccp_unregister_rng(ccp);
505 for (i = 0; i < ccp->cmd_q_count; i++)
506 if (ccp->cmd_q[i].kthread)
507 kthread_stop(ccp->cmd_q[i].kthread);
509 sp_free_ccp_irq(ccp->sp, ccp);
512 for (i = 0; i < ccp->cmd_q_count; i++)
513 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
518 static void ccp_destroy(struct ccp_device *ccp)
525 ccp_dmaengine_unregister(ccp);
528 ccp_unregister_rng(ccp);
531 ccp_del_device(ccp);
534 ccp_disable_queue_interrupts(ccp);
535 for (i = 0; i < ccp->cmd_q_count; i++) {
536 cmd_q = &ccp->cmd_q[i];
541 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
544 for (i = 0; i < ccp->cmd_q_count; i++)
545 if (ccp->cmd_q[i].kthread)
546 kthread_stop(ccp->cmd_q[i].kthread);
548 sp_free_ccp_irq(ccp->sp, ccp);
550 for (i = 0; i < ccp->cmd_q_count; i++)
551 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
554 while (!list_empty(&ccp->cmd)) {
556 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
560 while (!list_empty(&ccp->backlog)) {
562 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);