Lines Matching refs:cl

339 	cb->cl->tx_cb_queued++;
351 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
352 cb->cl->tx_cb_queued--;
362 * @cl: mei client
365 static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
370 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
381 * @cl: mei client
387 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
399 cb->cl = cl;
408 * mei_io_list_flush_cl - removes cbs belonging to the cl.
411 * @cl: host client
414 const struct mei_cl *cl)
419 if (cl == cb->cl) {
428 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
431 * @cl: host client
435 const struct mei_cl *cl,
441 if (cl == cb->cl && (!fp || fp == cb->fp))
464 * @cl: host client
466 static void mei_cl_free_pending(struct mei_cl *cl)
470 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
477 * @cl: host client
484 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
490 cb = mei_io_cb_init(cl, fop_type, fp);
511 * @cl: host client
519 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
527 length = max_t(size_t, length, mei_cl_mtu(cl));
529 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
533 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
538 * mei_cl_read_cb - find this cl's callback in the read list
541 * @cl: host client
546 struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
551 spin_lock(&cl->rd_completed_lock);
552 list_for_each_entry(cb, &cl->rd_completed, list)
557 spin_unlock(&cl->rd_completed_lock);
562 * mei_cl_flush_queues - flushes queue lists belonging to cl.
564 * @cl: host client
567 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
569 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
573 if (WARN_ON(!cl || !cl->dev))
576 dev = cl->dev;
578 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
579 mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
580 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
583 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
584 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
585 mei_cl_free_pending(cl);
587 spin_lock(&cl->rd_completed_lock);
588 mei_io_list_free_fp(&cl->rd_completed, fp);
589 spin_unlock(&cl->rd_completed_lock);
595 * mei_cl_init - initializes cl.
597 * @cl: host client to be initialized
600 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
602 memset(cl, 0, sizeof(*cl));
603 init_waitqueue_head(&cl->wait);
604 init_waitqueue_head(&cl->rx_wait);
605 init_waitqueue_head(&cl->tx_wait);
606 init_waitqueue_head(&cl->ev_wait);
607 INIT_LIST_HEAD(&cl->vtag_map);
608 spin_lock_init(&cl->rd_completed_lock);
609 INIT_LIST_HEAD(&cl->rd_completed);
610 INIT_LIST_HEAD(&cl->rd_pending);
611 INIT_LIST_HEAD(&cl->link);
612 cl->writing_state = MEI_IDLE;
613 cl->state = MEI_FILE_UNINITIALIZED;
614 cl->dev = dev;
618 * mei_cl_allocate - allocates cl structure and sets it up.
625 struct mei_cl *cl;
627 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
628 if (!cl)
631 mei_cl_init(cl, dev);
633 return cl;
639 * @cl: host client
645 int mei_cl_link(struct mei_cl *cl)
650 if (WARN_ON(!cl || !cl->dev))
653 dev = cl->dev;
669 cl->host_client_id = id;
670 list_add_tail(&cl->link, &dev->file_list);
674 cl->state = MEI_FILE_INITIALIZING;
676 cl_dbg(dev, cl, "link cl\n");
683 * @cl: host client
687 int mei_cl_unlink(struct mei_cl *cl)
692 if (!cl)
695 if (WARN_ON(!cl->dev))
698 dev = cl->dev;
700 cl_dbg(dev, cl, "unlink client");
706 if (cl->host_client_id)
707 clear_bit(cl->host_client_id, dev->host_clients_map);
709 list_del_init(&cl->link);
711 cl->state = MEI_FILE_UNINITIALIZED;
712 cl->writing_state = MEI_IDLE;
714 WARN_ON(!list_empty(&cl->rd_completed) ||
715 !list_empty(&cl->rd_pending) ||
716 !list_empty(&cl->link));
761 * @cl: host client
763 static void mei_cl_wake_all(struct mei_cl *cl)
765 struct mei_device *dev = cl->dev;
768 if (waitqueue_active(&cl->rx_wait)) {
769 cl_dbg(dev, cl, "Waking up reading client!\n");
770 wake_up_interruptible(&cl->rx_wait);
773 if (waitqueue_active(&cl->tx_wait)) {
774 cl_dbg(dev, cl, "Waking up writing client!\n");
775 wake_up_interruptible(&cl->tx_wait);
778 if (waitqueue_active(&cl->ev_wait)) {
779 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
780 wake_up_interruptible(&cl->ev_wait);
783 if (waitqueue_active(&cl->wait)) {
784 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
785 wake_up(&cl->wait);
793 * @cl: host client
795 static void mei_cl_set_disconnected(struct mei_cl *cl)
797 struct mei_device *dev = cl->dev;
799 if (cl->state == MEI_FILE_DISCONNECTED ||
800 cl->state <= MEI_FILE_INITIALIZING)
803 cl->state = MEI_FILE_DISCONNECTED;
804 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
805 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
806 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
807 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
808 mei_cl_wake_all(cl);
809 cl->rx_flow_ctrl_creds = 0;
810 cl->tx_flow_ctrl_creds = 0;
811 cl->timer_count = 0;
813 if (!cl->me_cl)
816 if (!WARN_ON(cl->me_cl->connect_count == 0))
817 cl->me_cl->connect_count--;
819 if (cl->me_cl->connect_count == 0)
820 cl->me_cl->tx_flow_ctrl_creds = 0;
822 mei_me_cl_put(cl->me_cl);
823 cl->me_cl = NULL;
826 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
839 cl->me_cl = me_cl;
840 cl->state = MEI_FILE_CONNECTING;
841 cl->me_cl->connect_count++;
849 * @cl: host client
854 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
859 dev = cl->dev;
861 ret = mei_hbm_cl_disconnect_req(dev, cl);
862 cl->status = ret;
864 cl->state = MEI_FILE_DISCONNECT_REPLY;
869 cl->timer_count = MEI_CONNECT_TIMEOUT;
879 * @cl: client
885 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
888 struct mei_device *dev = cl->dev;
901 ret = mei_cl_send_disconnect(cl, cb);
912 * @cl: host client
916 static int __mei_cl_disconnect(struct mei_cl *cl)
922 dev = cl->dev;
924 cl->state = MEI_FILE_DISCONNECTING;
926 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
933 rets = mei_cl_send_disconnect(cl, cb);
935 cl_err(dev, cl, "failed to disconnect.\n");
941 wait_event_timeout(cl->wait,
942 cl->state == MEI_FILE_DISCONNECT_REPLY ||
943 cl->state == MEI_FILE_DISCONNECTED,
947 rets = cl->status;
948 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
949 cl->state != MEI_FILE_DISCONNECTED) {
950 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
956 mei_cl_set_disconnected(cl);
958 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
967 * @cl: host client
973 int mei_cl_disconnect(struct mei_cl *cl)
978 if (WARN_ON(!cl || !cl->dev))
981 dev = cl->dev;
983 cl_dbg(dev, cl, "disconnecting");
985 if (!mei_cl_is_connected(cl))
988 if (mei_cl_is_fixed_address(cl)) {
989 mei_cl_set_disconnected(cl);
994 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
995 mei_cl_set_disconnected(cl);
1002 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1006 rets = __mei_cl_disconnect(cl);
1008 cl_dbg(dev, cl, "rpm: autosuspend\n");
1020 * @cl: private data of the file object
1024 static bool mei_cl_is_other_connecting(struct mei_cl *cl)
1029 dev = cl->dev;
1033 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
1043 * @cl: host client
1048 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1053 dev = cl->dev;
1055 ret = mei_hbm_cl_connect_req(dev, cl);
1056 cl->status = ret;
1058 cl->state = MEI_FILE_DISCONNECT_REPLY;
1063 cl->timer_count = MEI_CONNECT_TIMEOUT;
1071 * @cl: host client
1077 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1080 struct mei_device *dev = cl->dev;
1085 if (mei_cl_is_other_connecting(cl))
1096 rets = mei_cl_send_connect(cl, cb);
1106 * @cl: host client
1114 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1121 if (WARN_ON(!cl || !cl->dev || !me_cl))
1124 dev = cl->dev;
1126 rets = mei_cl_set_connecting(cl, me_cl);
1130 if (mei_cl_is_fixed_address(cl)) {
1131 cl->state = MEI_FILE_CONNECTED;
1139 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1143 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1150 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1151 rets = mei_cl_send_connect(cl, cb);
1157 wait_event_timeout(cl->wait,
1158 (cl->state == MEI_FILE_CONNECTED ||
1159 cl->state == MEI_FILE_DISCONNECTED ||
1160 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1161 cl->state == MEI_FILE_DISCONNECT_REPLY),
1165 if (!mei_cl_is_connected(cl)) {
1166 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1167 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1168 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1172 __mei_cl_disconnect(cl);
1178 if (!cl->status)
1179 cl->status = -EFAULT;
1182 rets = cl->status;
1184 cl_dbg(dev, cl, "rpm: autosuspend\n");
1191 if (!mei_cl_is_connected(cl))
1192 mei_cl_set_disconnected(cl);
1202 * Return: cl on success ERR_PTR on failure
1206 struct mei_cl *cl;
1209 cl = mei_cl_allocate(dev);
1210 if (!cl) {
1215 ret = mei_cl_link(cl);
1219 return cl;
1221 kfree(cl);
1226 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1228 * @cl: host client
1232 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1234 if (WARN_ON(!cl || !cl->me_cl))
1237 if (cl->tx_flow_ctrl_creds > 0)
1240 if (mei_cl_is_fixed_address(cl))
1243 if (mei_cl_is_single_recv_buf(cl)) {
1244 if (cl->me_cl->tx_flow_ctrl_creds > 0)
1254 * @cl: host client
1260 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1262 if (WARN_ON(!cl || !cl->me_cl))
1265 if (mei_cl_is_fixed_address(cl))
1268 if (mei_cl_is_single_recv_buf(cl)) {
1269 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1271 cl->me_cl->tx_flow_ctrl_creds--;
1273 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1275 cl->tx_flow_ctrl_creds--;
1308 * @cl: host client
1315 const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
1319 list_for_each_entry(vtag_l, &cl->vtag_map, list)
1329 * @cl: host client
1332 static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
1336 list_for_each_entry(vtag_l, &cl->vtag_map, list) {
1348 * @cl: host client
1350 static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
1354 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
1356 if (mei_cl_enqueue_ctrl_wr_cb(cl,
1357 mei_cl_mtu(cl),
1360 cl->rx_flow_ctrl_creds++;
1369 * @cl: host client
1375 int mei_cl_vt_support_check(const struct mei_cl *cl)
1377 struct mei_device *dev = cl->dev;
1382 if (!cl->me_cl)
1385 return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
1392 * @cl: host client
1396 void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1400 if (!mei_cl_vt_support_check(cl)) {
1401 fp = mei_cl_fp_by_vtag(cl, cb->vtag);
1408 mei_cl_reset_read_by_vtag(cl, cb->vtag);
1409 mei_cl_read_vtag_add_fc(cl);
1412 spin_lock(&cl->rd_completed_lock);
1413 list_add_tail(&cb->list, &cl->rd_completed);
1414 spin_unlock(&cl->rd_completed_lock);
1420 * @cl: host client
1424 void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1426 spin_lock(&cl->rd_completed_lock);
1428 spin_unlock(&cl->rd_completed_lock);
1464 * @cl: client
1470 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1473 struct mei_device *dev = cl->dev;
1488 ret = mei_hbm_cl_notify_req(dev, cl, request);
1490 cl->status = ret;
1502 * @cl: host client
1510 int mei_cl_notify_request(struct mei_cl *cl,
1518 if (WARN_ON(!cl || !cl->dev))
1521 dev = cl->dev;
1524 cl_dbg(dev, cl, "notifications not supported\n");
1528 if (!mei_cl_is_connected(cl))
1534 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1539 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1546 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1554 wait_event_timeout(cl->wait,
1555 cl->notify_en == request ||
1556 cl->status ||
1557 !mei_cl_is_connected(cl),
1561 if (cl->notify_en != request && !cl->status)
1562 cl->status = -EFAULT;
1564 rets = cl->status;
1567 cl_dbg(dev, cl, "rpm: autosuspend\n");
1578 * @cl: host client
1582 void mei_cl_notify(struct mei_cl *cl)
1586 if (!cl || !cl->dev)
1589 dev = cl->dev;
1591 if (!cl->notify_en)
1594 cl_dbg(dev, cl, "notify event");
1595 cl->notify_ev = true;
1596 if (!mei_cl_bus_notify_event(cl))
1597 wake_up_interruptible(&cl->ev_wait);
1599 if (cl->ev_async)
1600 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1607 * @cl: host client
1615 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1622 if (WARN_ON(!cl || !cl->dev))
1625 dev = cl->dev;
1628 cl_dbg(dev, cl, "notifications not supported\n");
1632 if (!mei_cl_is_connected(cl))
1635 if (cl->notify_ev)
1642 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1649 *notify_ev = cl->notify_ev;
1650 cl->notify_ev = false;
1657 * @cl: host client
1663 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1669 if (WARN_ON(!cl || !cl->dev))
1672 dev = cl->dev;
1674 if (!mei_cl_is_connected(cl))
1677 if (!mei_me_cl_is_active(cl->me_cl)) {
1678 cl_err(dev, cl, "no such me client\n");
1682 if (mei_cl_is_fixed_address(cl))
1686 if (cl->rx_flow_ctrl_creds) {
1687 mei_cl_set_read_by_fp(cl, fp);
1691 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1695 mei_cl_set_read_by_fp(cl, fp);
1700 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1706 rets = mei_hbm_cl_flow_control_req(dev, cl);
1710 list_move_tail(&cb->list, &cl->rd_pending);
1712 cl->rx_flow_ctrl_creds++;
1715 cl_dbg(dev, cl, "rpm: autosuspend\n");
1770 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1771 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1792 * @cl: client
1798 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1815 if (WARN_ON(!cl || !cl->dev))
1818 dev = cl->dev;
1824 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1829 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1852 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1890 cl->status = 0;
1891 cl->writing_state = MEI_WRITING;
1895 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1909 cl->status = rets;
1918 * @cl: host client
1923 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1939 if (WARN_ON(!cl || !cl->dev))
1945 dev = cl->dev;
1950 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
1958 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
1963 cl->writing_state = MEI_IDLE;
1966 rets = mei_cl_tx_flow_ctrl_creds(cl);
1977 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1983 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1989 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
2031 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
2035 cl->writing_state = MEI_WRITING;
2047 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
2050 rets = wait_event_interruptible(cl->tx_wait,
2051 cl->writing_state == MEI_WRITE_COMPLETE ||
2052 (!mei_cl_is_connected(cl)));
2060 if (cl->writing_state != MEI_WRITE_COMPLETE) {
2068 cl_dbg(dev, cl, "rpm: autosuspend\n");
2082 * @cl: private data of the file object.
2085 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
2087 struct mei_device *dev = cl->dev;
2092 cl->writing_state = MEI_WRITE_COMPLETE;
2093 if (waitqueue_active(&cl->tx_wait)) {
2094 wake_up_interruptible(&cl->tx_wait);
2102 mei_cl_add_rd_completed(cl, cb);
2103 if (!mei_cl_is_fixed_address(cl) &&
2104 !WARN_ON(!cl->rx_flow_ctrl_creds))
2105 cl->rx_flow_ctrl_creds--;
2106 if (!mei_cl_bus_rx_event(cl))
2107 wake_up_interruptible(&cl->rx_wait);
2114 if (waitqueue_active(&cl->wait))
2115 wake_up(&cl->wait);
2120 mei_cl_set_disconnected(cl);
2135 struct mei_cl *cl;
2137 list_for_each_entry(cl, &dev->file_list, link)
2138 mei_cl_set_disconnected(cl);