Lines Matching refs:dev

34 static int aac_src_get_sync_status(struct aac_dev *dev);
39 struct aac_dev *dev;
46 dev = ctx->dev;
49 if (dev->msi_enabled) {
52 bellbits = src_readl(dev, MUnit.ODR_MSI);
60 bellbits = src_readl(dev, MUnit.ODR_R);
63 src_writel(dev, MUnit.ODR_C, bellbits);
64 src_readl(dev, MUnit.ODR_C);
67 src_writel(dev, MUnit.ODR_C, bellbits);
68 src_readl(dev, MUnit.ODR_C);
83 if (!aac_sync_mode && !dev->msi_enabled) {
84 src_writel(dev, MUnit.ODR_C, bellbits);
85 src_readl(dev, MUnit.ODR_C);
88 if (dev->sync_fib) {
89 if (dev->sync_fib->callback)
90 dev->sync_fib->callback(dev->sync_fib->callback_data,
91 dev->sync_fib);
92 spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
93 if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
94 dev->management_fib_count--;
95 complete(&dev->sync_fib->event_wait);
97 spin_unlock_irqrestore(&dev->sync_fib->event_lock,
99 spin_lock_irqsave(&dev->sync_lock, sflags);
100 if (!list_empty(&dev->sync_fib_list)) {
101 entry = dev->sync_fib_list.next;
102 dev->sync_fib = list_entry(entry,
108 dev->sync_fib = NULL;
110 spin_unlock_irqrestore(&dev->sync_lock, sflags);
112 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
113 (u32)dev->sync_fib->hw_fib_pa,
118 if (!dev->msi_enabled)
125 if (dev->sa_firmware) {
126 u32 events = src_readl(dev, MUnit.SCR0);
128 aac_intr_normal(dev, events, 1, 0, NULL);
129 writel(events, &dev->IndexRegs->Mailbox[0]);
130 src_writel(dev, MUnit.IDR, 1 << 23);
132 if (dev->aif_thread && dev->fsa_dev)
133 aac_intr_normal(dev, 0, 2, 0, NULL);
135 if (dev->msi_enabled)
136 aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
141 index = dev->host_rrq_idx[vector_no];
146 handle = le32_to_cpu((dev->host_rrq[index])
155 if (dev->msi_enabled && dev->max_msix > 1)
156 atomic_dec(&dev->rrq_outstanding[vector_no]);
157 aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
158 dev->host_rrq[index++] = 0;
159 if (index == (vector_no + 1) * dev->vector_cap)
160 index = vector_no * dev->vector_cap;
161 dev->host_rrq_idx[vector_no] = index;
171 * @dev: Adapter
174 static void aac_src_disable_interrupt(struct aac_dev *dev)
176 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
181 * @dev: Adapter
184 static void aac_src_enable_interrupt_message(struct aac_dev *dev)
186 aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
191 * @dev: Adapter
209 static int src_sync_cmd(struct aac_dev *dev, u32 command,
220 writel(command, &dev->IndexRegs->Mailbox[0]);
224 writel(p1, &dev->IndexRegs->Mailbox[1]);
225 writel(p2, &dev->IndexRegs->Mailbox[2]);
226 writel(p3, &dev->IndexRegs->Mailbox[3]);
227 writel(p4, &dev->IndexRegs->Mailbox[4]);
232 if (!dev->msi_enabled)
233 src_writel(dev,
240 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
246 src_readl(dev, MUnit.OIMR);
251 src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
253 if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) &&
254 !dev->in_soft_reset) {
270 if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
274 if (dev->msi_enabled)
275 aac_src_access_devreg(dev,
278 src_writel(dev,
293 aac_adapter_enable_int(dev);
300 *status = readl(&dev->IndexRegs->Mailbox[0]);
302 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
304 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
306 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
308 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
310 dev->max_msix =
311 readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
315 if (!dev->msi_enabled)
316 src_writel(dev,
324 aac_adapter_enable_int(dev);
330 * @dev: Adapter
335 static void aac_src_interrupt_adapter(struct aac_dev *dev)
337 src_sync_cmd(dev, BREAKPOINT_REQUEST,
344 * @dev: Adapter
351 static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
356 src_writel(dev, MUnit.ODR_C,
360 src_writel(dev, MUnit.ODR_C,
364 src_writel(dev, MUnit.ODR_C,
368 src_writel(dev, MUnit.ODR_C,
372 src_writel(dev, MUnit.ODR_C,
376 src_writel(dev, MUnit.ODR_C,
387 * @dev: Adapter
392 static void aac_src_start_adapter(struct aac_dev *dev)
398 for (i = 0; i < dev->max_msix; i++) {
399 dev->host_rrq_idx[i] = i * dev->vector_cap;
400 atomic_set(&dev->rrq_outstanding[i], 0);
402 atomic_set(&dev->msix_counter, 0);
403 dev->fibs_pushed_no = 0;
405 init = dev->init;
406 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
409 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
410 lower_32_bits(dev->init_pa),
411 upper_32_bits(dev->init_pa),
419 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
420 (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
428 * @dev: device to check if healthy
433 static int aac_src_check_health(struct aac_dev *dev)
435 u32 status = src_readl(dev, MUnit.OMR);
472 static inline u32 aac_get_vector(struct aac_dev *dev)
474 return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
485 struct aac_dev *dev = fib->dev;
486 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
502 if (dev->msi_enabled && dev->max_msix > 1 &&
505 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
506 && dev->sa_firmware)
507 vector_no = aac_get_vector(dev);
548 atomic_inc(&dev->rrq_outstanding[vector_no]);
557 src_writeq(dev, MUnit.IQN_L, (u64)address);
559 spin_lock_irqsave(&fib->dev->iq_lock, flags);
560 src_writel(dev, MUnit.IQN_H,
562 src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
563 spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
566 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
567 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
600 src_writeq(dev, MUnit.IQ_L, (u64)address);
602 spin_lock_irqsave(&fib->dev->iq_lock, flags);
603 src_writel(dev, MUnit.IQ_H,
605 src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
606 spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
614 * @dev: device ioremap
618 static int aac_src_ioremap(struct aac_dev *dev, u32 size)
621 iounmap(dev->regs.src.bar1);
622 dev->regs.src.bar1 = NULL;
623 iounmap(dev->regs.src.bar0);
624 dev->base = dev->regs.src.bar0 = NULL;
627 dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
629 dev->base = NULL;
630 if (dev->regs.src.bar1 == NULL)
632 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
633 if (dev->base == NULL) {
634 iounmap(dev->regs.src.bar1);
635 dev->regs.src.bar1 = NULL;
638 dev->IndexRegs = &((struct src_registers __iomem *)
639 dev->base)->u.tupelo.IndexRegs;
645 * @dev: device ioremap
649 static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
652 iounmap(dev->regs.src.bar0);
653 dev->base = dev->regs.src.bar0 = NULL;
657 dev->regs.src.bar1 =
658 ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
659 dev->base = NULL;
660 if (dev->regs.src.bar1 == NULL)
662 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
663 if (dev->base == NULL) {
664 iounmap(dev->regs.src.bar1);
665 dev->regs.src.bar1 = NULL;
668 dev->IndexRegs = &((struct src_registers __iomem *)
669 dev->base)->u.denali.IndexRegs;
673 void aac_set_intx_mode(struct aac_dev *dev)
675 if (dev->msi_enabled) {
676 aac_src_access_devreg(dev, AAC_ENABLE_INTX);
677 dev->msi_enabled = 0;
682 static void aac_clear_omr(struct aac_dev *dev)
686 omr_value = src_readl(dev, MUnit.OMR);
697 src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX);
698 src_readl(dev, MUnit.OMR);
701 static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
708 supported_options3 = dev->supplement_adapter_info.supported_options3;
712 aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
716 static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
725 status = src_readl(dev, MUnit.OMR);
747 static void aac_src_drop_io(struct aac_dev *dev)
749 if (!dev->soft_reset_support)
752 aac_adapter_sync_cmd(dev, DROP_IO,
756 static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
758 aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
760 aac_src_drop_io(dev);
763 static void aac_send_iop_reset(struct aac_dev *dev)
765 aac_dump_fw_fib_iop_reset(dev);
767 aac_notify_fw_of_iop_reset(dev);
769 aac_set_intx_mode(dev);
771 aac_clear_omr(dev);
773 src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
778 static void aac_send_hardware_soft_reset(struct aac_dev *dev)
782 aac_clear_omr(dev);
783 val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
785 writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
789 static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
798 dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
803 if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
806 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
808 dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
811 dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
812 aac_send_iop_reset(dev);
817 is_ctrl_up = aac_is_ctrl_up_and_running(dev);
819 dev_err(&dev->pdev->dev, "IOP reset failed\n");
821 dev_info(&dev->pdev->dev, "IOP reset succeeded\n");
826 if (!dev->sa_firmware) {
827 dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
833 dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
834 aac_send_hardware_soft_reset(dev);
835 dev->msi_enabled = 0;
837 is_ctrl_up = aac_is_ctrl_up_and_running(dev);
839 dev_err(&dev->pdev->dev, "SOFT reset failed\n");
843 dev_info(&dev->pdev->dev, "SOFT reset succeeded\n");
854 if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
861 * @dev: Adapter
864 static int aac_src_select_comm(struct aac_dev *dev, int comm)
868 dev->a_ops.adapter_intr = aac_src_intr_message;
869 dev->a_ops.adapter_deliver = aac_src_deliver_message;
879 * @dev: device to configure
883 int aac_src_init(struct aac_dev *dev)
888 int instance = dev->id;
889 const char *name = dev->name;
891 dev->a_ops.adapter_ioremap = aac_src_ioremap;
892 dev->a_ops.adapter_comm = aac_src_select_comm;
894 dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
895 if (aac_adapter_ioremap(dev, dev->base_size)) {
901 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
902 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
904 if (dev->init_reset) {
905 dev->init_reset = false;
906 if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
913 status = src_readl(dev, MUnit.OMR);
915 if (aac_src_restart_adapter(dev,
916 aac_src_check_health(dev), IOP_HWSOFT_RESET))
923 status = src_readl(dev, MUnit.OMR);
926 dev->name, instance);
934 dev->name, instance);
941 while (!((status = src_readl(dev, MUnit.OMR)) &
947 dev->name, instance, status);
956 if (likely(!aac_src_restart_adapter(dev,
957 aac_src_check_health(dev), IOP_HWSOFT_RESET)))
968 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
969 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
970 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
971 dev->a_ops.adapter_notify = aac_src_notify_adapter;
972 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
973 dev->a_ops.adapter_check_health = aac_src_check_health;
974 dev->a_ops.adapter_restart = aac_src_restart_adapter;
975 dev->a_ops.adapter_start = aac_src_start_adapter;
981 aac_adapter_comm(dev, AAC_COMM_MESSAGE);
982 aac_adapter_disable_int(dev);
983 src_writel(dev, MUnit.ODR_C, 0xffffffff);
984 aac_adapter_enable_int(dev);
986 if (aac_init_adapter(dev) == NULL)
988 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
991 dev->msi = !pci_enable_msi(dev->pdev);
993 dev->aac_msix[0].vector_no = 0;
994 dev->aac_msix[0].dev = dev;
996 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
997 IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) {
999 if (dev->msi)
1000 pci_disable_msi(dev->pdev);
1006 dev->dbg_base = pci_resource_start(dev->pdev, 2);
1007 dev->dbg_base_mapped = dev->regs.src.bar1;
1008 dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
1009 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
1011 aac_adapter_enable_int(dev);
1013 if (!dev->sync_mode) {
1018 aac_src_start_adapter(dev);
1027 static int aac_src_wait_sync(struct aac_dev *dev, int *status)
1044 if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
1048 if (dev->msi_enabled)
1049 aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT);
1051 src_writel(dev, MUnit.ODR_C,
1068 status[0] = readl(&dev->IndexRegs->Mailbox[0]);
1069 status[1] = readl(&dev->IndexRegs->Mailbox[1]);
1070 status[2] = readl(&dev->IndexRegs->Mailbox[2]);
1071 status[3] = readl(&dev->IndexRegs->Mailbox[3]);
1072 status[4] = readl(&dev->IndexRegs->Mailbox[4]);
1086 * @dev: device to configure
1090 static int aac_src_soft_reset(struct aac_dev *dev)
1092 u32 status_omr = src_readl(dev, MUnit.OMR);
1114 dev->in_soft_reset = 1;
1115 dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX;
1118 rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0,
1124 if (aac_src_wait_sync(dev, status)) {
1138 dev->sa_firmware = 1;
1141 rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0,
1148 if (aac_src_wait_sync(dev, status)) {
1154 dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n",
1158 rc = aac_src_check_health(dev);
1161 dev->in_soft_reset = 0;
1162 dev->msi_enabled = 0;
1164 dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
1171 * @dev: device to configure
1175 int aac_srcv_init(struct aac_dev *dev)
1180 int instance = dev->id;
1181 const char *name = dev->name;
1183 dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
1184 dev->a_ops.adapter_comm = aac_src_select_comm;
1186 dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
1187 if (aac_adapter_ioremap(dev, dev->base_size)) {
1193 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1194 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1196 if (dev->init_reset) {
1197 dev->init_reset = false;
1198 if (aac_src_soft_reset(dev)) {
1199 aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET);
1208 status = src_readl(dev, MUnit.OMR);
1212 status = src_readl(dev, MUnit.OMR);
1215 dev->name, instance);
1229 status = src_readl(dev, MUnit.OMR);
1231 if (aac_src_restart_adapter(dev,
1232 aac_src_check_health(dev), IOP_HWSOFT_RESET))
1239 status = src_readl(dev, MUnit.OMR);
1241 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
1248 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
1257 status = src_readl(dev, MUnit.OMR);
1265 dev->name, instance, status);
1274 if (likely(!aac_src_restart_adapter(dev,
1275 aac_src_check_health(dev), IOP_HWSOFT_RESET)))
1287 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
1288 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
1289 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1290 dev->a_ops.adapter_notify = aac_src_notify_adapter;
1291 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1292 dev->a_ops.adapter_check_health = aac_src_check_health;
1293 dev->a_ops.adapter_restart = aac_src_restart_adapter;
1294 dev->a_ops.adapter_start = aac_src_start_adapter;
1300 aac_adapter_comm(dev, AAC_COMM_MESSAGE);
1301 aac_adapter_disable_int(dev);
1302 src_writel(dev, MUnit.ODR_C, 0xffffffff);
1303 aac_adapter_enable_int(dev);
1305 if (aac_init_adapter(dev) == NULL)
1307 if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
1308 (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
1310 if (dev->msi_enabled)
1311 aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
1313 if (aac_acquire_irq(dev))
1316 dev->dbg_base = pci_resource_start(dev->pdev, 2);
1317 dev->dbg_base_mapped = dev->regs.src.bar1;
1318 dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
1319 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
1321 aac_adapter_enable_int(dev);
1323 if (!dev->sync_mode) {
1328 aac_src_start_adapter(dev);
1337 void aac_src_access_devreg(struct aac_dev *dev, int mode)
1343 src_writel(dev,
1345 dev->OIMR = (dev->msi_enabled ?
1351 src_writel(dev,
1353 dev->OIMR = AAC_INT_DISABLE_ALL);
1358 val = src_readl(dev, MUnit.IDR);
1360 src_writel(dev, MUnit.IDR, val);
1361 src_readl(dev, MUnit.IDR);
1364 src_writel(dev, MUnit.IOAR, val);
1365 val = src_readl(dev, MUnit.OIMR);
1366 src_writel(dev,
1373 val = src_readl(dev, MUnit.IDR);
1375 src_writel(dev, MUnit.IDR, val);
1376 src_readl(dev, MUnit.IDR);
1381 val = src_readl(dev, MUnit.IDR);
1383 src_writel(dev, MUnit.IDR, val);
1384 src_readl(dev, MUnit.IDR);
1389 val = src_readl(dev, MUnit.IDR);
1391 src_writel(dev, MUnit.IDR, val);
1392 src_readl(dev, MUnit.IDR);
1397 val = src_readl(dev, MUnit.IDR);
1399 src_writel(dev, MUnit.IDR, val);
1400 src_readl(dev, MUnit.IDR);
1403 src_writel(dev, MUnit.IOAR, val);
1404 src_readl(dev, MUnit.IOAR);
1405 val = src_readl(dev, MUnit.OIMR);
1406 src_writel(dev, MUnit.OIMR,
1415 static int aac_src_get_sync_status(struct aac_dev *dev)
1420 msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0;
1422 if (!dev->msi_enabled) {
1429 legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
1431 dev->msi_enabled = 1;