Lines Matching refs:cm
203 struct cm_dev *cm;
226 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
300 static void *riocm_rx_get_msg(struct cm_dev *cm)
305 msg = rio_get_inb_message(cm->mport, cmbox);
308 if (cm->rx_buf[i] == msg) {
309 cm->rx_buf[i] = NULL;
310 cm->rx_slots++;
323 * riocm_rx_fill - fills a ring of receive buffers for given cm device
324 * @cm: cm_dev object
329 static void riocm_rx_fill(struct cm_dev *cm, int nent)
333 if (cm->rx_slots == 0)
336 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) {
337 if (cm->rx_buf[i] == NULL) {
338 cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL);
339 if (cm->rx_buf[i] == NULL)
341 rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]);
342 cm->rx_slots--;
349 * riocm_rx_free - frees all receive buffers associated with given cm device
350 * @cm: cm_dev object
354 static void riocm_rx_free(struct cm_dev *cm)
359 if (cm->rx_buf[i] != NULL) {
360 kfree(cm->rx_buf[i]);
361 cm->rx_buf[i] = NULL;
368 * @cm: cm_dev object
376 static int riocm_req_handler(struct cm_dev *cm, void *req_data)
404 req->cmdev = cm;
483 * @cm: cm_dev object
486 static void rio_cm_handler(struct cm_dev *cm, void *data)
490 if (!rio_mport_is_running(cm->mport))
500 riocm_req_handler(cm, data);
518 * @cm: cm_dev object
526 static int rio_rx_data_handler(struct cm_dev *cm, void *buf)
582 struct cm_dev *cm = container_of(work, struct cm_dev, rx_work);
586 if (!rio_mport_is_running(cm->mport))
590 mutex_lock(&cm->rx_lock);
591 data = riocm_rx_get_msg(cm);
593 riocm_rx_fill(cm, 1);
594 mutex_unlock(&cm->rx_lock);
611 rio_rx_data_handler(cm, data);
613 rio_cm_handler(cm, data);
620 struct cm_dev *cm = dev_id;
622 if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work))
623 queue_work(cm->rx_wq, &cm->rx_work);
628 * @cm: cm_dev object
635 static void rio_txcq_handler(struct cm_dev *cm, int slot)
644 cm->mport->id, slot, cm->tx_cnt);
646 spin_lock(&cm->tx_lock);
647 ack_slot = cm->tx_ack_slot;
652 while (cm->tx_cnt && ((ack_slot != slot) ||
653 (cm->tx_cnt == RIOCM_TX_RING_SIZE))) {
655 cm->tx_buf[ack_slot] = NULL;
658 cm->tx_cnt--;
661 if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE)
662 riocm_error("tx_cnt %d out of sync", cm->tx_cnt);
664 WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE));
666 cm->tx_ack_slot = ack_slot;
671 if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) {
675 list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
677 cm->tx_buf[cm->tx_slot] = req->buffer;
678 rc = rio_add_outb_message(cm->mport, req->rdev, cmbox,
683 ++cm->tx_cnt;
684 ++cm->tx_slot;
685 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
686 if (cm->tx_cnt == RIOCM_TX_RING_SIZE)
691 spin_unlock(&cm->tx_lock);
697 struct cm_dev *cm = dev_id;
699 if (cm && rio_mport_is_running(cm->mport))
700 rio_txcq_handler(cm, slot);
703 static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev,
717 spin_lock_irqsave(&cm->tx_lock, flags);
718 list_add_tail(&treq->node, &cm->tx_reqs);
719 spin_unlock_irqrestore(&cm->tx_lock, flags);
725 * @cm: cm_dev object
733 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
739 spin_lock_irqsave(&cm->tx_lock, flags);
741 if (cm->mport == NULL) {
746 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) {
752 cm->tx_buf[cm->tx_slot] = buffer;
753 rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len);
756 buffer, rdev->destid, cm->tx_slot, cm->tx_cnt);
758 ++cm->tx_cnt;
759 ++cm->tx_slot;
760 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
763 spin_unlock_irqrestore(&cm->tx_lock, flags);
932 * @cm: CM device to send connect request
942 static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm,
959 ch->cmdev = cm;
962 ch->loc_destid = cm->mport->host_deviceid;
989 ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr));
994 ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr));
1231 struct cm_dev *cm;
1238 list_for_each_entry(cm, &cm_dev_list, list) {
1239 if ((cm->mport->id == mport_id) &&
1240 rio_mport_is_running(cm->mport)) {
1262 ch->cmdev = cm;
1263 ch->loc_destid = cm->mport->host_deviceid;
1534 struct cm_dev *cm;
1543 list_for_each_entry(cm, &cm_dev_list, list) {
1544 if (cm->mport->id == mport_id) {
1545 count = cm->npeers;
1562 struct cm_dev *cm;
1579 list_for_each_entry(cm, &cm_dev_list, list)
1580 if (cm->mport->id == (u8)info[1])
1587 nent = min(info[0], cm->npeers);
1596 list_for_each_entry(peer, &cm->peers, node) {
1621 struct cm_dev *cm;
1636 list_for_each_entry(cm, &cm_dev_list, list) {
1638 *entry_ptr = (cm->mport->id << 16) |
1639 cm->mport->host_deviceid;
1781 struct cm_dev *cm;
1793 list_for_each_entry(cm, &cm_dev_list, list) {
1794 if (cm->mport->id == chan.mport_id) {
1803 if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) {
1811 list_for_each_entry(peer, &cm->peers, node) {
1823 return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel);
1948 struct cm_dev *cm;
1962 list_for_each_entry(cm, &cm_dev_list, list) {
1963 if (cm->mport == rdev->net->hport)
1973 list_add_tail(&peer->node, &cm->peers);
1974 cm->npeers++;
1991 struct cm_dev *cm;
2006 list_for_each_entry(cm, &cm_dev_list, list) {
2007 if (cm->mport == rdev->net->hport) {
2020 list_for_each_entry(peer, &cm->peers, node) {
2025 cm->npeers--;
2098 struct cm_dev *cm;
2103 cm = kzalloc(sizeof(*cm), GFP_KERNEL);
2104 if (!cm)
2107 cm->mport = mport;
2109 rc = rio_request_outb_mbox(mport, cm, cmbox,
2114 kfree(cm);
2118 rc = rio_request_inb_mbox(mport, cm, cmbox,
2124 kfree(cm);
2128 cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
2129 if (!cm->rx_wq) {
2132 kfree(cm);
2141 cm->rx_buf[i] = NULL;
2143 cm->rx_slots = RIOCM_RX_RING_SIZE;
2144 mutex_init(&cm->rx_lock);
2145 riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
2146 INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
2148 cm->tx_slot = 0;
2149 cm->tx_cnt = 0;
2150 cm->tx_ack_slot = 0;
2151 spin_lock_init(&cm->tx_lock);
2153 INIT_LIST_HEAD(&cm->peers);
2154 cm->npeers = 0;
2155 INIT_LIST_HEAD(&cm->tx_reqs);
2158 list_add_tail(&cm->list, &cm_dev_list);
2175 struct cm_dev *cm;
2186 list_for_each_entry(cm, &cm_dev_list, list) {
2187 if (cm->mport == mport) {
2188 list_del(&cm->list);
2197 flush_workqueue(cm->rx_wq);
2198 destroy_workqueue(cm->rx_wq);
2203 if (ch->cmdev == cm) {
2223 if (!list_empty(&cm->peers))
2225 list_for_each_entry_safe(peer, temp, &cm->peers, node) {
2231 riocm_rx_free(cm);
2232 kfree(cm);