Lines Matching defs:ctl

16 #include "ctl.h"
41 #define tb_ctl_WARN(ctl, format, arg...) \
42 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
44 #define tb_ctl_err(ctl, format, arg...) \
45 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
47 #define tb_ctl_warn(ctl, format, arg...) \
48 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
50 #define tb_ctl_info(ctl, format, arg...) \
51 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
53 #define tb_ctl_dbg(ctl, format, arg...) \
54 dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
111 static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
115 WARN_ON(req->ctl);
117 mutex_lock(&ctl->request_queue_lock);
118 if (!ctl->running) {
119 mutex_unlock(&ctl->request_queue_lock);
122 req->ctl = ctl;
123 list_add_tail(&req->list, &ctl->request_queue);
125 mutex_unlock(&ctl->request_queue_lock);
131 struct tb_ctl *ctl = req->ctl;
133 mutex_lock(&ctl->request_queue_lock);
138 mutex_unlock(&ctl->request_queue_lock);
147 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
152 mutex_lock(&pkg->ctl->request_queue_lock);
153 list_for_each_entry(req, &pkg->ctl->request_queue, list) {
161 mutex_unlock(&pkg->ctl->request_queue_lock);
222 struct tb_ctl *ctl = response->ctl;
232 tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
234 tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
236 tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
260 static void tb_cfg_print_error(struct tb_ctl *ctl,
274 tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
283 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
287 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
291 tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
296 tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
310 dma_pool_free(pkg->ctl->frame_pool,
316 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
321 pkg->ctl = ctl;
322 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
348 static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
354 tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
358 tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
362 pkg = tb_ctl_pkg_alloc(ctl);
372 res = tb_ring_tx(ctl->tx, &pkg->frame);
379 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
381 static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
384 return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
389 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
392 * from ctl->rx_packets, so we do
425 * ctl->rx_packets.
429 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
445 tb_ctl_err(pkg->ctl,
450 tb_ctl_handle_event(pkg->ctl, frame->eof,
460 tb_ctl_err(pkg->ctl,
466 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
480 req = tb_cfg_request_find(pkg->ctl, pkg);
504 * @ctl: Control channel to use
512 int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
524 ret = tb_cfg_request_enqueue(ctl, req);
528 ret = tb_ctl_tx(ctl, req->request, req->request_size,
569 * @ctl: Control channel to use
578 struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
587 ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
613 struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
614 if (!ctl)
616 ctl->nhi = nhi;
617 ctl->callback = cb;
618 ctl->callback_data = cb_data;
620 mutex_init(&ctl->request_queue_lock);
621 INIT_LIST_HEAD(&ctl->request_queue);
622 ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
624 if (!ctl->frame_pool)
627 ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
628 if (!ctl->tx)
631 ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
633 if (!ctl->rx)
637 ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
638 if (!ctl->rx_packets[i])
640 ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
643 tb_ctl_dbg(ctl, "control channel created\n");
644 return ctl;
646 tb_ctl_free(ctl);
655 * Must NOT be called from ctl->callback.
657 void tb_ctl_free(struct tb_ctl *ctl)
661 if (!ctl)
664 if (ctl->rx)
665 tb_ring_free(ctl->rx);
666 if (ctl->tx)
667 tb_ring_free(ctl->tx);
671 tb_ctl_pkg_free(ctl->rx_packets[i]);
674 dma_pool_destroy(ctl->frame_pool);
675 kfree(ctl);
681 void tb_ctl_start(struct tb_ctl *ctl)
684 tb_ctl_dbg(ctl, "control channel starting...\n");
685 tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
686 tb_ring_start(ctl->rx);
688 tb_ctl_rx_submit(ctl->rx_packets[i]);
690 ctl->running = true;
696 * All invocations of ctl->callback will have finished after this method
699 * Must NOT be called from ctl->callback.
701 void tb_ctl_stop(struct tb_ctl *ctl)
703 mutex_lock(&ctl->request_queue_lock);
704 ctl->running = false;
705 mutex_unlock(&ctl->request_queue_lock);
707 tb_ring_stop(ctl->rx);
708 tb_ring_stop(ctl->tx);
710 if (!list_empty(&ctl->request_queue))
711 tb_ctl_WARN(ctl, "dangling request in request_queue\n");
712 INIT_LIST_HEAD(&ctl->request_queue);
713 tb_ctl_dbg(ctl, "control channel stopped\n");
720 * @ctl: Control channel to use
728 int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
737 tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
739 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
792 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
815 res = tb_cfg_request_sync(ctl, req, timeout_msec);
827 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
864 res = tb_cfg_request_sync(ctl, req, timeout_msec);
890 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
929 res = tb_cfg_request_sync(ctl, req, timeout_msec);
948 static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
961 tb_cfg_print_error(ctl, res);
968 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
971 struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
980 return tb_cfg_get_error(ctl, space, &res);
983 tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
994 int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
997 struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
1006 return tb_cfg_get_error(ctl, space, &res);
1009 tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
1029 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1032 struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,