Lines Matching refs:ism

7 #define KMSG_COMPONENT "ism"
18 #include "ism.h"
24 #define DRV_NAME "ism"
41 struct mutex mutex; /* protects ism device list */
49 static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
53 spin_lock_irqsave(&ism->lock, flags);
54 ism->subs[client->id] = client;
55 spin_unlock_irqrestore(&ism->lock, flags);
60 struct ism_dev *ism;
79 list_for_each_entry(ism, &ism_dev_list.list, list) {
80 ism->priv[i] = NULL;
81 client->add(ism);
82 ism_setup_forwarding(client, ism);
93 struct ism_dev *ism;
98 list_for_each_entry(ism, &ism_dev_list.list, list) {
99 spin_lock_irqsave(&ism->lock, flags);
101 ism->subs[client->id] = NULL;
103 if (ism->sba_client_arr[i] == client->id) {
110 spin_unlock_irqrestore(&ism->lock, flags);
122 spin_unlock_irqrestore(&ism->lock, flags);
128 static int ism_cmd(struct ism_dev *ism, void *cmd)
133 __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
134 __ism_write_cmd(ism, req, 0, sizeof(*req));
138 __ism_read_cmd(ism, resp, 0, sizeof(*resp));
144 __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
149 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
157 return ism_cmd(ism, &cmd);
160 static int query_info(struct ism_dev *ism)
168 if (ism_cmd(ism, &cmd))
177 static int register_sba(struct ism_dev *ism)
183 sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
193 if (ism_cmd(ism, &cmd)) {
194 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
198 ism->sba = sba;
199 ism->sba_dma_addr = dma_handle;
204 static int register_ieq(struct ism_dev *ism)
210 ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
221 if (ism_cmd(ism, &cmd)) {
222 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
226 ism->ieq = ieq;
227 ism->ieq_idx = -1;
228 ism->ieq_dma_addr = dma_handle;
233 static int unregister_sba(struct ism_dev *ism)
237 if (!ism->sba)
240 ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
244 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
245 ism->sba, ism->sba_dma_addr);
247 ism->sba = NULL;
248 ism->sba_dma_addr = 0;
253 static int unregister_ieq(struct ism_dev *ism)
257 if (!ism->ieq)
260 ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
264 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
265 ism->ieq, ism->ieq_dma_addr);
267 ism->ieq = NULL;
268 ism->ieq_dma_addr = 0;
273 static int ism_read_local_gid(struct ism_dev *ism)
282 ret = ism_cmd(ism, &cmd);
286 ism->local_gid = cmd.response.gid;
291 static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
293 clear_bit(dmb->sba_idx, ism->sba_bitmap);
294 dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
298 static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
302 if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
306 bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
314 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
317 dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
322 clear_bit(dmb->sba_idx, ism->sba_bitmap);
327 int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
334 ret = ism_alloc_dmb(ism, dmb);
349 ret = ism_cmd(ism, &cmd);
351 ism_free_dmb(ism, dmb);
355 spin_lock_irqsave(&ism->lock, flags);
356 ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
357 spin_unlock_irqrestore(&ism->lock, flags);
363 int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
375 spin_lock_irqsave(&ism->lock, flags);
376 ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
377 spin_unlock_irqrestore(&ism->lock, flags);
379 ret = ism_cmd(ism, &cmd);
383 ism_free_dmb(ism, dmb);
389 static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
399 return ism_cmd(ism, &cmd);
402 static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
412 return ism_cmd(ism, &cmd);
421 int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
433 ret = __ism_move(ism, dmb_req, data, bytes);
472 static void ism_handle_event(struct ism_dev *ism)
478 while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
479 if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
480 ism->ieq_idx = 0;
482 entry = &ism->ieq->entry[ism->ieq_idx];
485 clt = ism->subs[i];
487 clt->handle_event(ism, entry);
494 struct ism_dev *ism = data;
500 bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
501 end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
503 spin_lock(&ism->lock);
504 ism->sba->s = 0;
512 dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
513 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
515 client_id = ism->sba_client_arr[bit];
516 if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id]))
518 ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
521 if (ism->sba->e) {
522 ism->sba->e = 0;
524 ism_handle_event(ism);
526 spin_unlock(&ism->lock);
530 static int ism_dev_init(struct ism_dev *ism)
532 struct pci_dev *pdev = ism->pdev;
539 ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
540 if (!ism->sba_client_arr)
542 memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
545 pci_name(pdev), ism);
549 ret = register_sba(ism);
553 ret = register_ieq(ism);
557 ret = ism_read_local_gid(ism);
561 if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
569 clients[i]->add(ism);
570 ism_setup_forwarding(clients[i], ism);
575 list_add(&ism->list, &ism_dev_list.list);
578 query_info(ism);
582 unregister_ieq(ism);
584 unregister_sba(ism);
586 free_irq(pci_irq_vector(pdev, 0), ism);
588 kfree(ism->sba_client_arr);
597 struct ism_dev *ism;
600 ism = kzalloc(sizeof(*ism), GFP_KERNEL);
601 if (!ism)
604 spin_lock_init(&ism->lock);
605 dev_set_drvdata(&pdev->dev, ism);
606 ism->pdev = pdev;
607 ism->dev.parent = &pdev->dev;
608 device_initialize(&ism->dev);
609 dev_set_name(&ism->dev, dev_name(&pdev->dev));
610 ret = device_add(&ism->dev);
630 ret = ism_dev_init(ism);
641 device_del(&ism->dev);
644 kfree(ism);
649 static void ism_dev_exit(struct ism_dev *ism)
651 struct pci_dev *pdev = ism->pdev;
655 spin_lock_irqsave(&ism->lock, flags);
657 ism->subs[i] = NULL;
658 spin_unlock_irqrestore(&ism->lock, flags);
664 clients[i]->remove(ism);
670 ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
671 unregister_ieq(ism);
672 unregister_sba(ism);
673 free_irq(pci_irq_vector(pdev, 0), ism);
674 kfree(ism->sba_client_arr);
676 list_del_init(&ism->list);
682 struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
684 ism_dev_exit(ism);
688 device_del(&ism->dev);
690 kfree(ism);
704 ism_debug_info = debug_register("ism", 2, 1, 16);
730 static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
743 return ism_cmd(ism, &cmd);
783 static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
797 return ism_cmd(ism, &cmd);
819 static u64 ism_get_local_gid(struct ism_dev *ism)
821 return ism->local_gid;
829 static u16 ism_get_chid(struct ism_dev *ism)
831 if (!ism || !ism->pdev)
834 return to_zpci(ism->pdev)->pchid;
844 struct ism_dev *ism = dev->priv;
846 return &ism->dev;