Lines Matching defs:ccp

15 #include <linux/ccp.h>
17 #include "ccp-dev.h"
27 struct ccp_device *ccp;
42 ccp = cmd_q->ccp;
44 mutex_lock(&ccp->sb_mutex);
46 start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
51 bitmap_set(ccp->lsbmap, start, count);
53 mutex_unlock(&ccp->sb_mutex);
57 ccp->sb_avail = 0;
59 mutex_unlock(&ccp->sb_mutex);
62 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
81 struct ccp_device *ccp = cmd_q->ccp;
83 mutex_lock(&ccp->sb_mutex);
84 bitmap_clear(ccp->lsbmap, start, count);
85 ccp->sb_avail = 1;
86 mutex_unlock(&ccp->sb_mutex);
87 wake_up_interruptible_all(&ccp->sb_queue);
265 ccp_log_error(cmd_q->ccp,
602 dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
608 static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
627 for (i = 0; i < ccp->cmd_q_count; i++) {
628 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
643 dev_dbg(ccp->dev,
665 static int ccp_assign_lsbs(struct ccp_device *ccp)
677 for (i = 0; i < ccp->cmd_q_count; i++)
679 lsb_pub, ccp->cmd_q[i].lsbmask,
684 if (n_lsbs >= ccp->cmd_q_count) {
694 rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
712 bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
720 static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
724 for (i = 0; i < ccp->cmd_q_count; i++)
725 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
728 static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
732 for (i = 0; i < ccp->cmd_q_count; i++)
733 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
738 struct ccp_device *ccp = (struct ccp_device *)data;
742 for (i = 0; i < ccp->cmd_q_count; i++) {
743 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
763 ccp5_enable_queue_interrupts(ccp);
768 struct ccp_device *ccp = (struct ccp_device *)data;
770 ccp5_disable_queue_interrupts(ccp);
771 ccp->total_interrupts++;
772 if (ccp->use_tasklet)
773 tasklet_schedule(&ccp->irq_tasklet);
775 ccp5_irq_bh((unsigned long)ccp);
779 static int ccp5_init(struct ccp_device *ccp)
781 struct device *dev = ccp->dev;
791 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
800 dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n");
804 for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
810 ccp->name, i);
820 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
821 ccp->cmd_q_count++;
823 cmd_q->ccp = ccp;
844 cmd_q->reg_control = ccp->io_regs +
867 if (ccp->cmd_q_count == 0) {
874 ccp5_disable_queue_interrupts(ccp);
875 for (i = 0; i < ccp->cmd_q_count; i++) {
876 cmd_q = &ccp->cmd_q[i];
890 ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
896 if (ccp->use_tasklet)
897 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
898 (unsigned long)ccp);
902 status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
903 status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
904 iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
905 iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
910 for (i = 0; i < ccp->cmd_q_count; i++) {
914 cmd_q = &ccp->cmd_q[i];
934 ret = ccp_assign_lsbs(ccp);
941 for (i = 0; i < ccp->cmd_q_count; i++) {
942 ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
943 ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
948 for (i = 0; i < ccp->cmd_q_count; i++) {
951 cmd_q = &ccp->cmd_q[i];
954 "%s-q%u", ccp->name, cmd_q->id);
967 ccp5_enable_queue_interrupts(ccp);
971 ccp_add_device(ccp);
973 ret = ccp_register_rng(ccp);
978 ret = ccp_dmaengine_register(ccp);
984 ccp5_debugfs_setup(ccp);
990 ccp_unregister_rng(ccp);
993 for (i = 0; i < ccp->cmd_q_count; i++)
994 if (ccp->cmd_q[i].kthread)
995 kthread_stop(ccp->cmd_q[i].kthread);
998 sp_free_ccp_irq(ccp->sp, ccp);
1001 for (i = 0; i < ccp->cmd_q_count; i++)
1002 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
1007 static void ccp5_destroy(struct ccp_device *ccp)
1014 ccp_dmaengine_unregister(ccp);
1017 ccp_unregister_rng(ccp);
1020 ccp_del_device(ccp);
1031 ccp5_disable_queue_interrupts(ccp);
1032 for (i = 0; i < ccp->cmd_q_count; i++) {
1033 cmd_q = &ccp->cmd_q[i];
1045 for (i = 0; i < ccp->cmd_q_count; i++)
1046 if (ccp->cmd_q[i].kthread)
1047 kthread_stop(ccp->cmd_q[i].kthread);
1049 sp_free_ccp_irq(ccp->sp, ccp);
1052 while (!list_empty(&ccp->cmd)) {
1054 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
1058 while (!list_empty(&ccp->backlog)) {
1060 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
1066 static void ccp5_config(struct ccp_device *ccp)
1069 iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
1072 static void ccp5other_config(struct ccp_device *ccp)
1079 iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
1080 iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
1082 rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
1083 iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
1086 iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET);
1087 iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET);
1088 iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET);
1090 iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
1091 iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
1093 iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET);
1095 ccp5_config(ccp);