Lines Matching defs:vk

109 bool bcm_vk_drv_access_ok(struct bcm_vk *vk)
111 return (!!atomic_read(&vk->msgq_inited));
114 void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask)
116 struct bcm_vk_alert *alert = &vk->host_alert;
120 spin_lock_irqsave(&vk->host_alert_lock, flags);
122 spin_unlock_irqrestore(&vk->host_alert_lock, flags);
124 if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
125 queue_work(vk->wq_thread, &vk->wq_work);
145 struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl);
147 if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) {
149 uptime_s = vkread32(vk, BAR_0, BAR_OS_UPTIME);
156 dev_dbg(&vk->pdev->dev, "Last uptime %d current %d, lost %d\n",
172 dev_err(&vk->pdev->dev, "Heartbeat Misses %d times, %d s!\n",
176 bcm_vk_blk_drv_access(vk);
177 bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL);
183 void bcm_vk_hb_init(struct bcm_vk *vk)
185 struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
191 void bcm_vk_hb_deinit(struct bcm_vk *vk)
193 struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
198 static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk,
202 spin_lock(&vk->msg_id_lock);
203 bitmap_clear(vk->bmap, start, nbits);
204 spin_unlock(&vk->msg_id_lock);
210 static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid)
216 spin_lock(&vk->ctx_lock);
219 if (vk->reset_pid) {
220 dev_err(&vk->pdev->dev,
222 vk->reset_pid);
227 for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
228 if (!vk->ctx[i].in_use) {
229 vk->ctx[i].in_use = true;
230 ctx = &vk->ctx[i];
236 dev_err(&vk->pdev->dev, "All context in use\n");
244 list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head);
247 kref_get(&vk->kref);
256 spin_unlock(&vk->ctx_lock);
261 static u16 bcm_vk_get_msg_id(struct bcm_vk *vk)
266 spin_lock(&vk->msg_id_lock);
274 vk->msg_id++;
275 if (vk->msg_id == VK_MSG_ID_BITMAP_SIZE)
276 vk->msg_id = 1;
278 if (test_bit(vk->msg_id, vk->bmap)) {
282 rc = vk->msg_id;
283 bitmap_set(vk->bmap, vk->msg_id, 1);
286 spin_unlock(&vk->msg_id_lock);
291 static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx)
300 dev_err(&vk->pdev->dev, "NULL context detected\n");
306 spin_lock(&vk->ctx_lock);
308 if (!vk->ctx[idx].in_use) {
309 dev_err(&vk->pdev->dev, "context[%d] not in use!\n", idx);
311 vk->ctx[idx].in_use = false;
312 vk->ctx[idx].miscdev = NULL;
317 list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) {
323 spin_unlock(&vk->ctx_lock);
346 struct bcm_vk *vk;
350 vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
377 bit_set = test_bit(msg_id, vk->bmap);
390 bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
399 void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk)
401 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
402 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
408 int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync)
411 struct device *dev = &vk->pdev->dev;
414 struct bcm_vk_msg_chan *chan_list[] = {&vk->to_v_msg_chan,
415 &vk->to_h_msg_chan};
421 * If the driver is loaded at startup where vk OS is not up yet,
426 if (!bcm_vk_msgq_marker_valid(vk)) {
431 msgq_off = vkread32(vk, BAR_1, VK_BAR1_MSGQ_CTRL_OFF);
434 num_q = vkread32(vk, BAR_1, VK_BAR1_MSGQ_NR) / 2;
442 vk->to_v_msg_chan.q_nr = num_q;
443 vk->to_h_msg_chan.q_nr = num_q;
446 msgq = vk->bar[BAR_1] + msgq_off;
452 if (bcm_vk_drv_access_ok(vk) && !force_sync) {
494 qinfo->q_start = vk->bar[BAR_1] + msgq_start;
504 atomic_set(&vk->msgq_inited, 1);
536 static u32 bcm_vk_append_ib_sgl(struct bcm_vk *vk,
543 struct device *dev = &vk->pdev->dev;
544 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
566 (ib_sgl_size + data[i].size) <= vk->ib_sgl_size) {
575 item_cnt, ib_sgl_size, vk->ib_sgl_size);
584 void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val)
586 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
589 vkwrite32(vk, db_val, BAR_0, qinfo->q_db_offset);
592 static int bcm_to_v_msg_enqueue(struct bcm_vk *vk, struct bcm_vk_wkent *entry)
595 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
596 struct device *dev = &vk->pdev->dev;
647 bcm_vk_blk_drv_access(vk);
648 bcm_vk_set_host_alert(vk, ERR_LOG_HOST_PCIE_DWN);
679 bcm_to_v_q_doorbell(vk, q_num, wr_idx + 1);
685 int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
690 struct device *dev = &vk->pdev->dev;
697 if (!bcm_vk_msgq_marker_valid(vk)) {
699 vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY));
716 rc = bcm_to_v_msg_enqueue(vk, entry);
727 static int bcm_vk_handle_last_sess(struct bcm_vk *vk, const pid_t pid,
731 struct device *dev = &vk->pdev->dev;
737 if (!bcm_vk_drv_access_ok(vk)) {
738 if (vk->reset_pid == pid)
739 vk->reset_pid = 0;
746 if (vk->reset_pid != pid)
747 rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num);
750 vk->reset_pid = 0;
755 static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk,
767 bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
775 s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk)
777 struct device *dev = &vk->pdev->dev;
778 struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
830 bcm_vk_blk_drv_access(vk);
831 bcm_vk_set_host_alert(vk,
886 entry = bcm_vk_dequeue_pending(vk,
887 &vk->to_v_msg_chan,
898 bcm_vk_append_pendq(&vk->to_h_msg_chan,
906 test_bit(msg_id, vk->bmap));
934 static int bcm_vk_data_init(struct bcm_vk *vk)
938 spin_lock_init(&vk->ctx_lock);
939 for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
940 vk->ctx[i].in_use = false;
941 vk->ctx[i].idx = i; /* self identity */
942 vk->ctx[i].miscdev = NULL;
944 spin_lock_init(&vk->msg_id_lock);
945 spin_lock_init(&vk->host_alert_lock);
946 vk->msg_id = 0;
950 INIT_LIST_HEAD(&vk->pid_ht[i].head);
957 struct bcm_vk *vk = dev_id;
959 if (!bcm_vk_drv_access_ok(vk)) {
960 dev_err(&vk->pdev->dev,
965 queue_work(vk->wq_thread, &vk->wq_work);
975 struct bcm_vk *vk = container_of(miscdev, struct bcm_vk, miscdev);
976 struct device *dev = &vk->pdev->dev;
980 ctx = bcm_vk_get_ctx(vk, task_tgid_nr(current));
1008 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
1010 struct device *dev = &vk->pdev->dev;
1011 struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
1016 if (!bcm_vk_drv_access_ok(vk))
1078 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
1081 struct device *dev = &vk->pdev->dev;
1088 if (!bcm_vk_drv_access_ok(vk))
1102 entry = kzalloc(sizeof(*entry) + count + vk->ib_sgl_size,
1120 msgq = vk->to_v_msg_chan.msgq[q_num];
1122 if (entry->to_v_blks + (vk->ib_sgl_size >> VK_MSGQ_BLK_SZ_SHIFT)
1132 rc = bcm_vk_get_msg_id(vk);
1156 if (vk->reset_pid) {
1189 sgl_extra_blks = bcm_vk_append_ib_sgl(vk, entry, data,
1224 bcm_vk_append_pendq(&vk->to_v_msg_chan, q_num, entry);
1226 rc = bcm_to_v_msg_enqueue(vk, entry);
1232 (vk,
1233 &vk->to_v_msg_chan,
1242 bcm_vk_msgid_bitmap_clear(vk, get_msg_id(&entry->to_v_msg[0]), 1);
1254 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
1255 struct device *dev = &vk->pdev->dev;
1275 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
1276 struct device *dev = &vk->pdev->dev;
1304 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, ctx);
1305 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, ctx);
1307 ret = bcm_vk_free_ctx(vk, ctx);
1309 ret = bcm_vk_handle_last_sess(vk, pid, ctx->q_num);
1313 kref_put(&vk->kref, bcm_vk_release_data);
1318 int bcm_vk_msg_init(struct bcm_vk *vk)
1320 struct device *dev = &vk->pdev->dev;
1323 if (bcm_vk_data_init(vk)) {
1328 if (bcm_vk_msg_chan_init(&vk->to_v_msg_chan) ||
1329 bcm_vk_msg_chan_init(&vk->to_h_msg_chan)) {
1335 ret = bcm_vk_sync_msgq(vk, false);
1344 void bcm_vk_msg_remove(struct bcm_vk *vk)
1346 bcm_vk_blk_drv_access(vk);
1349 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
1350 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);