Lines Matching refs:ohci
42 #include "ohci.h"
44 #define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
45 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
46 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
88 struct fw_ohci *ohci;
118 struct fw_ohci *ohci;
291 static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
293 return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
323 #define has_reboot_by_cycle_timer_read_quirk(ohci) false
411 static void log_irqs(struct fw_ohci *ohci, u32 evt)
421 ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
460 static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
467 ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
468 self_id_count, generation, ohci->node_id);
470 for (s = ohci->self_id_buffer; self_id_count--; ++s)
472 ohci_notice(ohci,
479 ohci_notice(ohci,
516 static void log_ar_at_event(struct fw_ohci *ohci,
529 ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
549 ohci_notice(ohci, "A%c %s, %s\n",
553 ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
557 ohci_notice(ohci,
564 ohci_notice(ohci,
572 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
574 writel(data, ohci->registers + offset);
577 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
579 return readl(ohci->registers + offset);
582 static inline void flush_writes(const struct fw_ohci *ohci)
585 reg_read(ohci, OHCI1394_Version);
590 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
594 static int read_phy_reg(struct fw_ohci *ohci, int addr)
599 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
601 val = reg_read(ohci, OHCI1394_PhyControl);
615 ohci_err(ohci, "failed to read phy reg %d\n", addr);
621 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
625 reg_write(ohci, OHCI1394_PhyControl,
628 val = reg_read(ohci, OHCI1394_PhyControl);
638 ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
644 static int update_phy_reg(struct fw_ohci *ohci, int addr,
647 int ret = read_phy_reg(ohci, addr);
658 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
661 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
665 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
669 return read_phy_reg(ohci, addr);
674 struct fw_ohci *ohci = fw_ohci(card);
677 mutex_lock(&ohci->phy_reg_mutex);
678 ret = read_phy_reg(ohci, addr);
679 mutex_unlock(&ohci->phy_reg_mutex);
687 struct fw_ohci *ohci = fw_ohci(card);
690 mutex_lock(&ohci->phy_reg_mutex);
691 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
692 mutex_unlock(&ohci->phy_reg_mutex);
717 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
722 struct device *dev = ctx->ohci->card.device;
739 struct fw_ohci *ohci = ctx->ohci;
741 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
742 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
743 flush_writes(ohci);
745 ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
827 dma_sync_single_for_cpu(ctx->ohci->card.device,
833 dma_sync_single_for_cpu(ctx->ohci->card.device,
840 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
847 struct fw_ohci *ohci = ctx->ohci;
906 p.generation = ohci->request_generation;
908 log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
932 if (!(ohci->quirks & QUIRK_RESET_PACKET))
933 ohci->request_generation = (p.header[2] >> 16) & 0xff;
934 } else if (ctx == &ohci->ar_request_ctx) {
935 fw_core_handle_request(&ohci->card, &p);
937 fw_core_handle_response(&ohci->card, &p);
963 dma_sync_single_for_device(ctx->ohci->card.device,
1017 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
1020 struct device *dev = ohci->card.device;
1027 ctx->ohci = ohci;
1048 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
1049 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
1079 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
1080 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
1131 spin_lock_irqsave(&ctx->ohci->lock, flags);
1133 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1141 * context. Must be called with ohci->lock held.
1156 desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC);
1177 static int context_init(struct context *ctx, struct fw_ohci *ohci,
1180 ctx->ohci = ohci;
1212 struct fw_card *card = &ctx->ohci->card;
1221 /* Must be called with ohci->lock held */
1255 struct fw_ohci *ohci = ctx->ohci;
1257 reg_write(ohci, COMMAND_PTR(ctx->regs),
1259 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1260 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1262 flush_writes(ohci);
1290 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
1303 struct fw_ohci *ohci = ctx->ohci;
1307 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1311 reg = reg_read(ohci, CONTROL_SET(ctx->regs));
1318 ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
1334 struct fw_ohci *ohci = ctx->ohci;
1413 payload_bus = dma_map_single(ohci->card.device,
1417 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1443 if (ohci->generation != packet->generation) {
1445 dma_unmap_single(ohci->card.device, payload_bus,
1454 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1478 struct fw_ohci *ohci = context->ohci;
1492 dma_unmap_single(ohci->card.device, packet->payload_bus,
1498 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
1548 packet->callback(packet, &ohci->card, packet->ack);
1559 static void handle_local_rom(struct fw_ohci *ohci,
1580 (void *) ohci->config_rom + i, length);
1583 fw_core_handle_response(&ohci->card, &response);
1586 static void handle_local_lock(struct fw_ohci *ohci,
1613 reg_write(ohci, OHCI1394_CSRData, lock_data);
1614 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1615 reg_write(ohci, OHCI1394_CSRControl, sel);
1618 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1619 lock_old = cpu_to_be32(reg_read(ohci,
1627 ohci_err(ohci, "swap not done (CSR lock timeout)\n");
1631 fw_core_handle_response(&ohci->card, &response);
1638 if (ctx == &ctx->ohci->at_request_ctx) {
1640 packet->callback(packet, &ctx->ohci->card, packet->ack);
1651 handle_local_rom(ctx->ohci, packet, csr);
1657 handle_local_lock(ctx->ohci, packet, csr);
1660 if (ctx == &ctx->ohci->at_request_ctx)
1661 fw_core_handle_request(&ctx->ohci->card, packet);
1663 fw_core_handle_response(&ctx->ohci->card, packet);
1667 if (ctx == &ctx->ohci->at_response_ctx) {
1669 packet->callback(packet, &ctx->ohci->card, packet->ack);
1673 static u32 get_cycle_time(struct fw_ohci *ohci);
1680 spin_lock_irqsave(&ctx->ohci->lock, flags);
1682 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1683 ctx->ohci->generation == packet->generation) {
1684 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1687 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
1694 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1698 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
1700 packet->callback(packet, &ctx->ohci->card, packet->ack);
1704 static void detect_dead_context(struct fw_ohci *ohci,
1709 ctl = reg_read(ohci, CONTROL_SET(regs));
1711 ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
1715 static void handle_dead_contexts(struct fw_ohci *ohci)
1720 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1721 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1722 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1723 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1725 if (!(ohci->it_context_support & (1 << i)))
1728 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1731 if (!(ohci->ir_context_support & (1 << i)))
1734 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1765 static u32 get_cycle_time(struct fw_ohci *ohci)
1772 if (has_reboot_by_cycle_timer_read_quirk(ohci))
1775 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1777 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1780 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1784 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1804 static u32 update_bus_time(struct fw_ohci *ohci)
1806 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1808 if (unlikely(!ohci->bus_time_running)) {
1809 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1810 ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
1812 ohci->bus_time_running = true;
1815 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1816 ohci->bus_time += 0x40;
1818 return ohci->bus_time | cycle_time_seconds;
1821 static int get_status_for_port(struct fw_ohci *ohci, int port_index)
1825 mutex_lock(&ohci->phy_reg_mutex);
1826 reg = write_phy_reg(ohci, 7, port_index);
1828 reg = read_phy_reg(ohci, 8);
1829 mutex_unlock(&ohci->phy_reg_mutex);
1842 static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1849 entry = ohci->self_id_buffer[i];
1858 static int initiated_reset(struct fw_ohci *ohci)
1863 mutex_lock(&ohci->phy_reg_mutex);
1864 reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
1866 reg = read_phy_reg(ohci, 8);
1868 reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
1870 reg = read_phy_reg(ohci, 12); /* read register 12 */
1879 mutex_unlock(&ohci->phy_reg_mutex);
1888 static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1894 reg = reg_read(ohci, OHCI1394_NodeID);
1896 ohci_notice(ohci,
1902 reg = ohci_read_phy_reg(&ohci->card, 4);
1907 reg = ohci_read_phy_reg(&ohci->card, 1);
1913 status = get_status_for_port(ohci, i);
1919 self_id |= initiated_reset(ohci);
1921 pos = get_self_id_pos(ohci, self_id, self_id_count);
1923 memmove(&(ohci->self_id_buffer[pos+1]),
1924 &(ohci->self_id_buffer[pos]),
1925 (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1926 ohci->self_id_buffer[pos] = self_id;
1934 struct fw_ohci *ohci =
1942 reg = reg_read(ohci, OHCI1394_NodeID);
1944 ohci_notice(ohci,
1949 ohci_notice(ohci, "malconfigured bus\n");
1952 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1956 if (!(ohci->is_root && is_new_root))
1957 reg_write(ohci, OHCI1394_LinkControlSet,
1959 ohci->is_root = is_new_root;
1961 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1963 ohci_notice(ohci, "self ID receive error\n");
1975 ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
1979 generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
1983 u32 id = cond_le32_to_cpu(ohci->self_id[i]);
1984 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
1995 ohci_notice(ohci, "ignoring spurious self IDs\n");
2000 ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
2004 ohci->self_id_buffer[j] = id;
2007 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2008 self_id_count = find_and_insert_self_id(ohci, self_id_count);
2010 ohci_notice(ohci,
2017 ohci_notice(ohci, "no self IDs\n");
2036 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
2038 ohci_notice(ohci, "new bus reset, discarding self ids\n");
2043 spin_lock_irq(&ohci->lock);
2045 ohci->generation = -1; /* prevent AT packet queueing */
2046 context_stop(&ohci->at_request_ctx);
2047 context_stop(&ohci->at_response_ctx);
2049 spin_unlock_irq(&ohci->lock);
2056 at_context_flush(&ohci->at_request_ctx);
2057 at_context_flush(&ohci->at_response_ctx);
2059 spin_lock_irq(&ohci->lock);
2061 ohci->generation = generation;
2062 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2064 if (ohci->quirks & QUIRK_RESET_PACKET)
2065 ohci->request_generation = generation;
2076 if (ohci->next_config_rom != NULL) {
2077 if (ohci->next_config_rom != ohci->config_rom) {
2078 free_rom = ohci->config_rom;
2079 free_rom_bus = ohci->config_rom_bus;
2081 ohci->config_rom = ohci->next_config_rom;
2082 ohci->config_rom_bus = ohci->next_config_rom_bus;
2083 ohci->next_config_rom = NULL;
2091 reg_write(ohci, OHCI1394_BusOptions,
2092 be32_to_cpu(ohci->config_rom[2]));
2093 ohci->config_rom[0] = ohci->next_header;
2094 reg_write(ohci, OHCI1394_ConfigROMhdr,
2095 be32_to_cpu(ohci->next_header));
2099 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
2100 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
2103 spin_unlock_irq(&ohci->lock);
2106 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
2108 log_selfids(ohci, generation, self_id_count);
2110 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
2111 self_id_count, ohci->self_id_buffer,
2112 ohci->csr_state_setclear_abdicate);
2113 ohci->csr_state_setclear_abdicate = false;
2118 struct fw_ohci *ohci = data;
2122 event = reg_read(ohci, OHCI1394_IntEventClear);
2131 reg_write(ohci, OHCI1394_IntEventClear,
2133 log_irqs(ohci, event);
2136 queue_work(selfid_workqueue, &ohci->bus_reset_work);
2139 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
2142 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
2145 tasklet_schedule(&ohci->at_request_ctx.tasklet);
2148 tasklet_schedule(&ohci->at_response_ctx.tasklet);
2151 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2152 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2157 &ohci->ir_context_list[i].context.tasklet);
2163 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2164 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2169 &ohci->it_context_list[i].context.tasklet);
2175 ohci_err(ohci, "register access failure\n");
2178 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2179 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2180 reg_write(ohci, OHCI1394_IntEventClear,
2183 ohci_err(ohci, "PCI posted write error\n");
2188 ohci_notice(ohci, "isochronous cycle too long\n");
2189 reg_write(ohci, OHCI1394_LinkControlSet,
2201 ohci_notice(ohci, "isochronous cycle inconsistent\n");
2205 handle_dead_contexts(ohci);
2208 spin_lock(&ohci->lock);
2209 update_bus_time(ohci);
2210 spin_unlock(&ohci->lock);
2212 flush_writes(ohci);
2217 static int software_reset(struct fw_ohci *ohci)
2222 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2224 val = reg_read(ohci, OHCI1394_HCControlSet);
2246 static int configure_1394a_enhancements(struct fw_ohci *ohci)
2252 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2258 ret = read_phy_reg(ohci, 2);
2262 ret = read_paged_phy_reg(ohci, 1, 8);
2269 if (ohci->quirks & QUIRK_NO_1394A)
2280 ret = update_phy_reg(ohci, 5, clear, set);
2288 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2291 reg_write(ohci, OHCI1394_HCControlClear,
2297 static int probe_tsb41ba3d(struct fw_ohci *ohci)
2303 reg = read_phy_reg(ohci, 2);
2310 reg = read_paged_phy_reg(ohci, 1, i + 10);
2322 struct fw_ohci *ohci = fw_ohci(card);
2326 ret = software_reset(ohci);
2328 ohci_err(ohci, "failed to reset ohci card\n");
2345 reg_write(ohci, OHCI1394_HCControlSet,
2348 flush_writes(ohci);
2352 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2357 ohci_err(ohci, "failed to set Link Power Status\n");
2361 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2362 ret = probe_tsb41ba3d(ohci);
2366 ohci_notice(ohci, "local TSB41BA3D phy\n");
2368 ohci->quirks &= ~QUIRK_TI_SLLZ059;
2371 reg_write(ohci, OHCI1394_HCControlClear,
2374 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2375 reg_write(ohci, OHCI1394_LinkControlSet,
2379 reg_write(ohci, OHCI1394_ATRetries,
2385 ohci->bus_time_running = false;
2388 if (ohci->ir_context_support & (1 << i))
2389 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2392 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2394 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2400 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2401 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2402 reg_write(ohci, OHCI1394_FairnessControl, 0);
2403 card->priority_budget_implemented = ohci->pri_req_max != 0;
2405 reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
2406 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2407 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2409 ret = configure_1394a_enhancements(ohci);
2438 ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2439 &ohci->next_config_rom_bus, GFP_KERNEL);
2440 if (ohci->next_config_rom == NULL)
2443 copy_config_rom(ohci->next_config_rom, config_rom, length);
2449 ohci->next_config_rom = ohci->config_rom;
2450 ohci->next_config_rom_bus = ohci->config_rom_bus;
2453 ohci->next_header = ohci->next_config_rom[0];
2454 ohci->next_config_rom[0] = 0;
2455 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2456 reg_write(ohci, OHCI1394_BusOptions,
2457 be32_to_cpu(ohci->next_config_rom[2]));
2458 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2460 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2474 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2476 reg_write(ohci, OHCI1394_HCControlSet,
2480 reg_write(ohci, OHCI1394_LinkControlSet,
2484 ar_context_run(&ohci->ar_request_ctx);
2485 ar_context_run(&ohci->ar_response_ctx);
2487 flush_writes(ohci);
2490 fw_schedule_bus_reset(&ohci->card, false, true);
2498 struct fw_ohci *ohci;
2502 ohci = fw_ohci(card);
2527 * We use ohci->lock to avoid racing with the code that sets
2528 * ohci->next_config_rom to NULL (see bus_reset_work).
2531 next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2536 spin_lock_irq(&ohci->lock);
2540 * push our new allocation into the ohci->next_config_rom
2549 if (ohci->next_config_rom == NULL) {
2550 ohci->next_config_rom = next_config_rom;
2551 ohci->next_config_rom_bus = next_config_rom_bus;
2555 copy_config_rom(ohci->next_config_rom, config_rom, length);
2557 ohci->next_header = config_rom[0];
2558 ohci->next_config_rom[0] = 0;
2560 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2562 spin_unlock_irq(&ohci->lock);
2566 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom,
2578 fw_schedule_bus_reset(&ohci->card, true, true);
2585 struct fw_ohci *ohci = fw_ohci(card);
2587 at_context_transmit(&ohci->at_request_ctx, packet);
2592 struct fw_ohci *ohci = fw_ohci(card);
2594 at_context_transmit(&ohci->at_response_ctx, packet);
2599 struct fw_ohci *ohci = fw_ohci(card);
2600 struct context *ctx = &ohci->at_request_ctx;
2610 dma_unmap_single(ohci->card.device, packet->payload_bus,
2613 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
2618 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
2620 packet->callback(packet, &ohci->card, packet->ack);
2631 struct fw_ohci *ohci = fw_ohci(card);
2643 spin_lock_irqsave(&ohci->lock, flags);
2645 if (ohci->generation != generation) {
2657 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2659 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2661 flush_writes(ohci);
2663 spin_unlock_irqrestore(&ohci->lock, flags);
2670 struct fw_ohci *ohci = fw_ohci(card);
2677 if (ohci->is_root &&
2678 (reg_read(ohci, OHCI1394_LinkControlSet) &
2683 if (ohci->csr_state_setclear_abdicate)
2689 return reg_read(ohci, OHCI1394_NodeID) << 16;
2692 return get_cycle_time(ohci);
2700 spin_lock_irqsave(&ohci->lock, flags);
2701 value = update_bus_time(ohci);
2702 spin_unlock_irqrestore(&ohci->lock, flags);
2706 value = reg_read(ohci, OHCI1394_ATRetries);
2710 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2711 (ohci->pri_req_max << 8);
2721 struct fw_ohci *ohci = fw_ohci(card);
2726 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2727 reg_write(ohci, OHCI1394_LinkControlClear,
2729 flush_writes(ohci);
2732 ohci->csr_state_setclear_abdicate = false;
2736 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2737 reg_write(ohci, OHCI1394_LinkControlSet,
2739 flush_writes(ohci);
2742 ohci->csr_state_setclear_abdicate = true;
2746 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2747 flush_writes(ohci);
2751 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2752 reg_write(ohci, OHCI1394_IntEventSet,
2754 flush_writes(ohci);
2758 spin_lock_irqsave(&ohci->lock, flags);
2759 ohci->bus_time = (update_bus_time(ohci) & 0x40) |
2761 spin_unlock_irqrestore(&ohci->lock, flags);
2767 reg_write(ohci, OHCI1394_ATRetries, value);
2768 flush_writes(ohci);
2772 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2773 flush_writes(ohci);
2836 dma_sync_single_range_for_cpu(context->ohci->card.device,
2875 dma_sync_single_range_for_cpu(context->ohci->card.device,
2892 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2930 dma_sync_single_range_for_cpu(context->ohci->card.device,
2977 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2981 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2982 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2983 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2984 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2985 ohci->mc_channels = channels;
2991 struct fw_ohci *ohci = fw_ohci(card);
2998 spin_lock_irq(&ohci->lock);
3002 mask = &ohci->it_context_mask;
3008 ctx = &ohci->it_context_list[index];
3013 channels = &ohci->ir_context_channels;
3014 mask = &ohci->ir_context_mask;
3021 ctx = &ohci->ir_context_list[index];
3026 mask = &ohci->ir_context_mask;
3028 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
3030 ohci->mc_allocated = true;
3033 ctx = &ohci->ir_context_list[index];
3042 spin_unlock_irq(&ohci->lock);
3054 ret = context_init(&ctx->context, ohci, regs, callback);
3059 set_multichannel_mask(ohci, 0);
3068 spin_lock_irq(&ohci->lock);
3076 ohci->mc_allocated = false;
3081 spin_unlock_irq(&ohci->lock);
3090 struct fw_ohci *ohci = ctx->context.ohci;
3100 index = ctx - ohci->it_context_list;
3106 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
3107 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
3115 index = ctx - ohci->ir_context_list;
3122 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
3123 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
3124 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
3138 struct fw_ohci *ohci = fw_ohci(base->card);
3144 index = ctx - ohci->it_context_list;
3145 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
3150 index = ctx - ohci->ir_context_list;
3151 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
3154 flush_writes(ohci);
3163 struct fw_ohci *ohci = fw_ohci(base->card);
3172 spin_lock_irqsave(&ohci->lock, flags);
3176 index = ctx - ohci->it_context_list;
3177 ohci->it_context_mask |= 1 << index;
3181 index = ctx - ohci->ir_context_list;
3182 ohci->ir_context_mask |= 1 << index;
3183 ohci->ir_context_channels |= 1ULL << base->channel;
3187 index = ctx - ohci->ir_context_list;
3188 ohci->ir_context_mask |= 1 << index;
3189 ohci->ir_context_channels |= ohci->mc_channels;
3190 ohci->mc_channels = 0;
3191 ohci->mc_allocated = false;
3195 spin_unlock_irqrestore(&ohci->lock, flags);
3200 struct fw_ohci *ohci = fw_ohci(base->card);
3207 spin_lock_irqsave(&ohci->lock, flags);
3210 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3211 *channels = ohci->ir_context_channels;
3214 set_multichannel_mask(ohci, *channels);
3218 spin_unlock_irqrestore(&ohci->lock, flags);
3229 static void ohci_resume_iso_dma(struct fw_ohci *ohci)
3234 for (i = 0 ; i < ohci->n_ir ; i++) {
3235 ctx = &ohci->ir_context_list[i];
3240 for (i = 0 ; i < ohci->n_it ; i++) {
3241 ctx = &ohci->it_context_list[i];
3329 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3357 struct device *device = ctx->context.ohci->card.device;
3475 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3498 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
3510 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
3520 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3610 struct fw_ohci *ohci = pci_get_drvdata(pdev);
3614 ar_context_release(&ohci->ar_response_ctx);
3615 ar_context_release(&ohci->ar_request_ctx);
3617 dev_notice(dev, "removed fw-ohci device\n");
3623 struct fw_ohci *ohci;
3634 ohci = devres_alloc(release_ohci, sizeof(*ohci), GFP_KERNEL);
3635 if (ohci == NULL)
3637 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3638 pci_set_drvdata(dev, ohci);
3640 devres_add(&dev->dev, ohci);
3651 spin_lock_init(&ohci->lock);
3652 mutex_init(&ohci->phy_reg_mutex);
3654 INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
3658 ohci_err(ohci, "invalid MMIO resource\n");
3664 ohci_err(ohci, "request and map MMIO resource unavailable\n");
3667 ohci->registers = pcim_iomap_table(dev)[0];
3675 ohci->quirks = ohci_quirks[i].flags;
3679 ohci->quirks = param_quirks;
3682 ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
3691 ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus,
3693 if (!ohci->misc_buffer)
3696 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3701 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3706 err = context_init(&ohci->at_request_ctx, ohci,
3711 err = context_init(&ohci->at_response_ctx, ohci,
3716 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3717 ohci->ir_context_channels = ~0ULL;
3718 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3719 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3720 ohci->ir_context_mask = ohci->ir_context_support;
3721 ohci->n_ir = hweight32(ohci->ir_context_mask);
3722 size = sizeof(struct iso_context) * ohci->n_ir;
3723 ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
3724 if (!ohci->ir_context_list)
3727 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3728 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3730 if (!ohci->it_context_support) {
3731 ohci_notice(ohci, "overriding IsoXmitIntMask\n");
3732 ohci->it_context_support = 0xf;
3734 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3735 ohci->it_context_mask = ohci->it_context_support;
3736 ohci->n_it = hweight32(ohci->it_context_mask);
3737 size = sizeof(struct iso_context) * ohci->n_it;
3738 ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
3739 if (!ohci->it_context_list)
3742 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
3743 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3745 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3748 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3749 reg_read(ohci, OHCI1394_GUIDLo);
3751 if (!(ohci->quirks & QUIRK_NO_MSI))
3754 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name, ohci);
3756 ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
3760 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
3764 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3765 ohci_notice(ohci,
3768 version >> 16, version & 0xff, ohci->card.index,
3769 ohci->n_ir, ohci->n_it, ohci->quirks,
3770 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3783 struct fw_ohci *ohci = pci_get_drvdata(dev);
3789 if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
3790 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3791 flush_writes(ohci);
3793 cancel_work_sync(&ohci->bus_reset_work);
3794 fw_core_remove_card(&ohci->card);
3801 software_reset(ohci);
3805 dev_notice(&dev->dev, "removing fw-ohci device\n");
3811 struct fw_ohci *ohci = pci_get_drvdata(dev);
3814 software_reset(ohci);
3817 ohci_err(ohci, "pci_save_state failed\n");
3822 ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
3830 struct fw_ohci *ohci = pci_get_drvdata(dev);
3838 ohci_err(ohci, "pci_enable_device failed\n");
3843 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3844 !reg_read(ohci, OHCI1394_GUIDHi)) {
3845 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3846 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3849 err = ohci_enable(&ohci->card, NULL, 0);
3853 ohci_resume_iso_dma(ohci);