Lines Matching defs:ring
8 * extension to Broadcom FlexRM ring manager. The FlexRM ring
13 * rings where each mailbox channel represents a separate FlexRM ring.
128 /* ====== FlexRM ring descriptor defines ===== */
292 /* ====== FlexRM ring descriptor helper routines ===== */
924 struct flexrm_ring *ring;
931 ring = &mbox->rings[i];
932 if (readl(ring->regs + RING_CONTROL) &
939 ring->num, state,
940 (unsigned long long)ring->bd_dma_base,
942 (unsigned long long)ring->cmpl_dma_base,
952 struct flexrm_ring *ring;
959 ring = &mbox->rings[i];
960 bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
961 val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
964 ring->bd_dma_base);
966 ring->num,
968 (u32)ring->bd_write_offset,
969 (u32)ring->cmpl_read_offset,
970 (u32)atomic_read(&ring->msg_send_count),
971 (u32)atomic_read(&ring->msg_cmpl_count));
975 static int flexrm_new_request(struct flexrm_ring *ring,
992 spin_lock_irqsave(&ring->lock, flags);
993 reqid = bitmap_find_free_region(ring->requests_bmap,
995 spin_unlock_irqrestore(&ring->lock, flags);
998 ring->requests[reqid] = msg;
1001 ret = flexrm_dma_map(ring->mbox->dev, msg);
1003 ring->requests[reqid] = NULL;
1004 spin_lock_irqsave(&ring->lock, flags);
1005 bitmap_release_region(ring->requests_bmap, reqid, 0);
1006 spin_unlock_irqrestore(&ring->lock, flags);
1011 read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
1012 val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
1014 read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);
1025 write_offset = ring->bd_write_offset;
1027 if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
1041 /* Write descriptors to ring */
1043 ring->bd_base + ring->bd_write_offset,
1044 RING_BD_TOGGLE_VALID(ring->bd_write_offset),
1045 ring->bd_base, ring->bd_base + RING_BD_SIZE);
1052 /* Save ring BD write offset */
1053 ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
1056 atomic_inc_return(&ring->msg_send_count);
1064 flexrm_dma_unmap(ring->mbox->dev, msg);
1065 ring->requests[reqid] = NULL;
1066 spin_lock_irqsave(&ring->lock, flags);
1067 bitmap_release_region(ring->requests_bmap, reqid, 0);
1068 spin_unlock_irqrestore(&ring->lock, flags);
1074 static int flexrm_process_completions(struct flexrm_ring *ring)
1081 struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];
1083 spin_lock_irqsave(&ring->lock, flags);
1093 cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1095 cmpl_read_offset = ring->cmpl_read_offset;
1096 ring->cmpl_read_offset = cmpl_write_offset;
1098 spin_unlock_irqrestore(&ring->lock, flags);
1104 desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));
1114 dev_warn(ring->mbox->dev,
1115 "ring%d got completion desc=0x%lx with error %d\n",
1116 ring->num, (unsigned long)desc, err);
1123 msg = ring->requests[reqid];
1125 dev_warn(ring->mbox->dev,
1126 "ring%d null msg pointer for completion desc=0x%lx\n",
1127 ring->num, (unsigned long)desc);
1132 ring->requests[reqid] = NULL;
1133 spin_lock_irqsave(&ring->lock, flags);
1134 bitmap_release_region(ring->requests_bmap, reqid, 0);
1135 spin_unlock_irqrestore(&ring->lock, flags);
1138 flexrm_dma_unmap(ring->mbox->dev, msg);
1145 atomic_inc_return(&ring->msg_cmpl_count);
1196 struct flexrm_ring *ring = chan->con_priv;
1202 rc = flexrm_new_request(ring, msg,
1213 return flexrm_new_request(ring, NULL, data);
1229 struct flexrm_ring *ring = chan->con_priv;
1232 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
1233 GFP_KERNEL, &ring->bd_dma_base);
1234 if (!ring->bd_base) {
1235 dev_err(ring->mbox->dev,
1236 "can't allocate BD memory for ring%d\n",
1237 ring->num);
1247 next_addr += ring->bd_dma_base;
1253 flexrm_write_desc(ring->bd_base + off, d);
1257 ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool,
1258 GFP_KERNEL, &ring->cmpl_dma_base);
1259 if (!ring->cmpl_base) {
1260 dev_err(ring->mbox->dev,
1261 "can't allocate completion memory for ring%d\n",
1262 ring->num);
1268 if (ring->irq == UINT_MAX) {
1269 dev_err(ring->mbox->dev,
1270 "ring%d IRQ not available\n", ring->num);
1274 ret = request_threaded_irq(ring->irq,
1277 0, dev_name(ring->mbox->dev), ring);
1279 dev_err(ring->mbox->dev,
1280 "failed to request ring%d IRQ\n", ring->num);
1283 ring->irq_requested = true;
1286 ring->irq_aff_hint = CPU_MASK_NONE;
1287 val = ring->mbox->num_rings;
1289 cpumask_set_cpu((ring->num / val) % num_online_cpus(),
1290 &ring->irq_aff_hint);
1291 ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint);
1293 dev_err(ring->mbox->dev,
1294 "failed to set IRQ affinity hint for ring%d\n",
1295 ring->num);
1299 /* Disable/inactivate ring */
1300 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1303 val = BD_START_ADDR_VALUE(ring->bd_dma_base);
1304 writel_relaxed(val, ring->regs + RING_BD_START_ADDR);
1307 ring->bd_write_offset =
1308 readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
1309 ring->bd_write_offset *= RING_DESC_SIZE;
1312 val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
1313 writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
1316 ring->cmpl_read_offset =
1317 readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1318 ring->cmpl_read_offset *= RING_DESC_SIZE;
1320 /* Read ring Tx, Rx, and Outstanding counts to clear */
1321 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
1322 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
1323 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
1324 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
1325 readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);
1329 val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
1331 val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
1332 writel_relaxed(val, ring->regs + RING_MSI_CONTROL);
1334 /* Enable/activate ring */
1336 writel_relaxed(val, ring->regs + RING_CONTROL);
1339 atomic_set(&ring->msg_send_count, 0);
1340 atomic_set(&ring->msg_cmpl_count, 0);
1345 free_irq(ring->irq, ring);
1346 ring->irq_requested = false;
1348 dma_pool_free(ring->mbox->cmpl_pool,
1349 ring->cmpl_base, ring->cmpl_dma_base);
1350 ring->cmpl_base = NULL;
1352 dma_pool_free(ring->mbox->bd_pool,
1353 ring->bd_base, ring->bd_dma_base);
1354 ring->bd_base = NULL;
1364 struct flexrm_ring *ring = chan->con_priv;
1366 /* Disable/inactivate ring */
1367 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1369 /* Set ring flush state */
1372 ring->regs + RING_CONTROL);
1374 if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1380 dev_err(ring->mbox->dev,
1381 "setting ring%d flush state timedout\n", ring->num);
1383 /* Clear ring flush state */
1385 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1387 if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1393 dev_err(ring->mbox->dev,
1394 "clearing ring%d flush state timedout\n", ring->num);
1398 msg = ring->requests[reqid];
1403 ring->requests[reqid] = NULL;
1406 flexrm_dma_unmap(ring->mbox->dev, msg);
1414 bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
1417 if (ring->irq_requested) {
1418 irq_update_affinity_hint(ring->irq, NULL);
1419 free_irq(ring->irq, ring);
1420 ring->irq_requested = false;
1423 /* Free-up completion descriptor ring */
1424 if (ring->cmpl_base) {
1425 dma_pool_free(ring->mbox->cmpl_pool,
1426 ring->cmpl_base, ring->cmpl_dma_base);
1427 ring->cmpl_base = NULL;
1430 /* Free-up BD descriptor ring */
1431 if (ring->bd_base) {
1432 dma_pool_free(ring->mbox->bd_pool,
1433 ring->bd_base, ring->bd_dma_base);
1434 ring->bd_base = NULL;
1449 struct flexrm_ring *ring;
1464 ring = chan->con_priv;
1465 ring->msi_count_threshold = pa->args[1];
1466 ring->msi_timer_val = pa->args[2];
1477 struct flexrm_ring *ring = &mbox->rings[desc->msi_index];
1480 writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
1481 writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS);
1482 writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
1491 struct flexrm_ring *ring;
1526 /* Allocate driver ring structs */
1527 ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL);
1528 if (!ring) {
1532 mbox->rings = ring;
1534 /* Initialize members of driver ring structs */
1537 ring = &mbox->rings[index];
1538 ring->num = index;
1539 ring->mbox = mbox;
1547 ring->regs = regs;
1549 ring->irq = UINT_MAX;
1550 ring->irq_requested = false;
1551 ring->msi_timer_val = MSI_TIMER_VAL_MASK;
1552 ring->msi_count_threshold = 0x1;
1553 memset(ring->requests, 0, sizeof(ring->requests));
1554 ring->bd_base = NULL;
1555 ring->bd_dma_base = 0;
1556 ring->cmpl_base = NULL;
1557 ring->cmpl_dma_base = 0;
1558 atomic_set(&ring->msg_send_count, 0);
1559 atomic_set(&ring->msg_cmpl_count, 0);
1560 spin_lock_init(&ring->lock);
1561 bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
1562 ring->cmpl_read_offset = 0;
1573 /* Create DMA pool for ring BD memory */
1581 /* Create DMA pool for ring completion memory */
1589 /* Allocate platform MSIs for each ring */
1595 /* Save alloced IRQ numbers for each ring */