Lines Matching refs:dev

53 static inline u32 r592_read_reg(struct r592_device *dev, int address)
55 u32 value = readl(dev->mmio + address);
61 static inline void r592_write_reg(struct r592_device *dev,
65 writel(value, dev->mmio + address);
69 static inline u32 r592_read_reg_raw_be(struct r592_device *dev, int address)
71 u32 value = __raw_readl(dev->mmio + address);
77 static inline void r592_write_reg_raw_be(struct r592_device *dev,
81 __raw_writel(cpu_to_be32(value), dev->mmio + address);
85 static inline void r592_set_reg_mask(struct r592_device *dev,
88 u32 reg = readl(dev->mmio + address);
90 writel(reg | mask , dev->mmio + address);
94 static inline void r592_clear_reg_mask(struct r592_device *dev,
97 u32 reg = readl(dev->mmio + address);
100 writel(reg & ~mask, dev->mmio + address);
105 static int r592_wait_status(struct r592_device *dev, u32 mask, u32 wanted_mask)
108 u32 reg = r592_read_reg(dev, R592_STATUS);
115 reg = r592_read_reg(dev, R592_STATUS);
130 static int r592_enable_device(struct r592_device *dev, bool enable)
137 r592_write_reg(dev, R592_POWER, R592_POWER_0 | R592_POWER_1);
140 r592_set_reg_mask(dev, R592_IO, R592_IO_RESET);
145 r592_write_reg(dev, R592_POWER, 0);
151 static int r592_set_mode(struct r592_device *dev, bool parallel_mode)
157 r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_SERIAL);
159 r592_clear_reg_mask(dev, R592_POWER, R592_POWER_20);
165 r592_set_reg_mask(dev, R592_POWER, R592_POWER_20);
167 r592_clear_reg_mask(dev, R592_IO,
171 r592_write_reg(dev, R592_IO_MODE, R592_IO_MODE_PARALLEL);
174 dev->parallel_mode = parallel_mode;
179 static void r592_host_reset(struct r592_device *dev)
181 r592_set_reg_mask(dev, R592_IO, R592_IO_RESET);
183 r592_set_mode(dev, dev->parallel_mode);
188 static void r592_clear_interrupts(struct r592_device *dev)
191 r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_ACK_MASK);
192 r592_clear_reg_mask(dev, R592_REG_MSC, IRQ_ALL_EN_MASK);
197 static int r592_test_io_error(struct r592_device *dev)
199 if (!(r592_read_reg(dev, R592_STATUS) &
207 static int r592_test_fifo_empty(struct r592_device *dev)
209 if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY)
213 r592_host_reset(dev);
215 if (r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_FIFO_EMPTY)
223 static void r592_start_dma(struct r592_device *dev, bool is_write)
227 spin_lock_irqsave(&dev->irq_lock, flags);
230 r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK);
231 r592_set_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK);
234 r592_write_reg(dev, R592_FIFO_DMA, sg_dma_address(&dev->req->sg));
237 reg = r592_read_reg(dev, R592_FIFO_DMA_SETTINGS);
244 r592_write_reg(dev, R592_FIFO_DMA_SETTINGS, reg);
246 spin_unlock_irqrestore(&dev->irq_lock, flags);
250 static void r592_stop_dma(struct r592_device *dev, int error)
252 r592_clear_reg_mask(dev, R592_FIFO_DMA_SETTINGS,
256 r592_write_reg(dev, R592_FIFO_DMA,
257 dev->dummy_dma_page_physical_address);
259 r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_EN_MASK);
260 r592_clear_reg_mask(dev, R592_REG_MSC, DMA_IRQ_ACK_MASK);
261 dev->dma_error = error;
265 static void r592_check_dma(struct r592_device *dev)
267 dev->dma_capable = r592_enable_dma &&
268 (r592_read_reg(dev, R592_FIFO_DMA_SETTINGS) &
273 static int r592_transfer_fifo_dma(struct r592_device *dev)
278 if (!dev->dma_capable || !dev->req->long_data)
281 len = dev->req->sg.length;
282 is_write = dev->req->data_dir == WRITE;
289 dev->dma_error = 0;
290 reinit_completion(&dev->dma_done);
293 sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
296 if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) {
301 r592_start_dma(dev, is_write);
305 &dev->dma_done, msecs_to_jiffies(1000))) {
307 r592_stop_dma(dev, -ETIMEDOUT);
310 dma_unmap_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
314 return dev->dma_error;
324 static void r592_write_fifo_pio(struct r592_device *dev,
328 if (!kfifo_is_empty(&dev->pio_fifo)) {
331 int copy_len = kfifo_in(&dev->pio_fifo, buffer, len);
333 if (!kfifo_is_full(&dev->pio_fifo))
338 copy_len = kfifo_out(&dev->pio_fifo, tmp, 4);
340 r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)tmp);
343 WARN_ON(!kfifo_is_empty(&dev->pio_fifo));
347 r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer);
354 kfifo_in(&dev->pio_fifo, buffer, len);
358 static void r592_flush_fifo_write(struct r592_device *dev)
363 if (kfifo_is_empty(&dev->pio_fifo))
366 len = kfifo_out(&dev->pio_fifo, buffer, 4);
367 r592_write_reg_raw_be(dev, R592_FIFO_PIO, *(u32 *)buffer);
375 static void r592_read_fifo_pio(struct r592_device *dev,
381 if (!kfifo_is_empty(&dev->pio_fifo)) {
383 kfifo_out(&dev->pio_fifo, buffer, min(4, len));
387 if (!kfifo_is_empty(&dev->pio_fifo))
393 *(u32 *)buffer = r592_read_reg_raw_be(dev, R592_FIFO_PIO);
399 *(u32 *)tmp = r592_read_reg_raw_be(dev, R592_FIFO_PIO);
400 kfifo_in(&dev->pio_fifo, tmp, 4);
401 len -= kfifo_out(&dev->pio_fifo, buffer, len);
409 static int r592_transfer_fifo_pio(struct r592_device *dev)
413 bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
416 kfifo_reset(&dev->pio_fifo);
418 if (!dev->req->long_data) {
420 r592_write_fifo_pio(dev, dev->req->data,
421 dev->req->data_len);
422 r592_flush_fifo_write(dev);
424 r592_read_fifo_pio(dev, dev->req->data,
425 dev->req->data_len);
430 sg_miter_start(&miter, &dev->req->sg, 1, SG_MITER_ATOMIC |
436 r592_write_fifo_pio(dev, miter.addr, miter.length);
438 r592_read_fifo_pio(dev, miter.addr, miter.length);
443 r592_flush_fifo_write(dev);
451 static void r592_execute_tpc(struct r592_device *dev)
457 if (!dev->req) {
462 is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
463 len = dev->req->long_data ?
464 dev->req->sg.length : dev->req->data_len;
473 if (!(r592_read_reg(dev, R592_REG_MSC) & R592_REG_MSC_PRSNT)) {
480 memstick_debug_get_tpc_name(dev->req->tpc), len);
484 r592_set_reg_mask(dev, R592_IO, R592_IO_DIRECTION);
486 r592_clear_reg_mask(dev, R592_IO, R592_IO_DIRECTION);
489 error = r592_test_fifo_empty(dev);
495 error = r592_transfer_fifo_dma(dev);
497 error = r592_transfer_fifo_pio(dev);
505 (dev->req->tpc << R592_TPC_EXEC_TPC_SHIFT) |
508 r592_write_reg(dev, R592_TPC_EXEC, reg);
512 if (dev->req->need_card_int)
515 error = r592_wait_status(dev, status, status);
522 error = r592_test_io_error(dev);
530 error = r592_transfer_fifo_dma(dev);
532 error = r592_transfer_fifo_pio(dev);
537 if (dev->parallel_mode && dev->req->need_card_int) {
539 dev->req->int_reg = 0;
540 status = r592_read_reg(dev, R592_STATUS);
543 dev->req->int_reg |= MEMSTICK_INT_CMDNAK;
545 dev->req->int_reg |= MEMSTICK_INT_BREQ;
547 dev->req->int_reg |= MEMSTICK_INT_ERR;
549 dev->req->int_reg |= MEMSTICK_INT_CED;
555 dev->req->error = error;
556 r592_clear_reg_mask(dev, R592_REG_MSC, R592_REG_MSC_LED);
564 struct r592_device *dev = (struct r592_device *)data;
568 spin_lock_irqsave(&dev->io_thread_lock, flags);
570 error = memstick_next_req(dev->host, &dev->req);
571 spin_unlock_irqrestore(&dev->io_thread_lock, flags);
587 r592_execute_tpc(dev);
595 static void r592_update_card_detect(struct r592_device *dev)
597 u32 reg = r592_read_reg(dev, R592_REG_MSC);
610 r592_write_reg(dev, R592_REG_MSC, reg);
616 struct r592_device *dev = from_timer(dev, t, detect_timer);
617 r592_update_card_detect(dev);
618 memstick_detect_change(dev->host);
624 struct r592_device *dev = (struct r592_device *)data;
631 spin_lock_irqsave(&dev->irq_lock, flags);
633 reg = r592_read_reg(dev, R592_REG_MSC);
639 r592_write_reg(dev, R592_REG_MSC, reg);
653 mod_timer(&dev->detect_timer,
669 r592_stop_dma(dev, error);
670 complete(&dev->dma_done);
673 spin_unlock_irqrestore(&dev->irq_lock, flags);
681 struct r592_device *dev = memstick_priv(host);
687 return r592_enable_device(dev, true);
689 return r592_enable_device(dev, false);
696 return r592_set_mode(dev, 0);
698 return r592_set_mode(dev, 1);
710 struct r592_device *dev = memstick_priv(host);
713 if (dev->req)
716 spin_lock_irqsave(&dev->io_thread_lock, flags);
717 if (wake_up_process(dev->io_thread))
719 spin_unlock_irqrestore(&dev->io_thread_lock, flags);
733 struct r592_device *dev;
736 host = memstick_alloc_host(sizeof(struct r592_device), &pdev->dev);
740 dev = memstick_priv(host);
741 dev->host = host;
742 dev->pci_dev = pdev;
743 pci_set_drvdata(pdev, dev);
751 error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
759 dev->mmio = pci_ioremap_bar(pdev, 0);
760 if (!dev->mmio) {
765 dev->irq = pdev->irq;
766 spin_lock_init(&dev->irq_lock);
767 spin_lock_init(&dev->io_thread_lock);
768 init_completion(&dev->dma_done);
769 INIT_KFIFO(dev->pio_fifo);
770 timer_setup(&dev->detect_timer, r592_detect_timer, 0);
776 r592_check_dma(dev);
778 dev->io_thread = kthread_run(r592_process_thread, dev, "r592_io");
779 if (IS_ERR(dev->io_thread)) {
780 error = PTR_ERR(dev->io_thread);
785 dev->dummy_dma_page = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
786 &dev->dummy_dma_page_physical_address, GFP_KERNEL);
787 r592_stop_dma(dev , 0);
789 error = request_irq(dev->irq, &r592_irq, IRQF_SHARED,
790 DRV_NAME, dev);
794 r592_update_card_detect(dev);
802 free_irq(dev->irq, dev);
804 if (dev->dummy_dma_page)
805 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
806 dev->dummy_dma_page_physical_address);
808 kthread_stop(dev->io_thread);
810 iounmap(dev->mmio);
824 struct r592_device *dev = pci_get_drvdata(pdev);
828 kthread_stop(dev->io_thread);
829 del_timer_sync(&dev->detect_timer);
830 r592_enable_device(dev, false);
832 while (!error && dev->req) {
833 dev->req->error = -ETIME;
834 error = memstick_next_req(dev->host, &dev->req);
836 memstick_remove_host(dev->host);
838 if (dev->dummy_dma_page)
839 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
840 dev->dummy_dma_page_physical_address);
842 free_irq(dev->irq, dev);
843 iounmap(dev->mmio);
846 memstick_free_host(dev->host);
852 struct r592_device *dev = dev_get_drvdata(core_dev);
854 r592_clear_interrupts(dev);
855 memstick_suspend_host(dev->host);
856 del_timer_sync(&dev->detect_timer);
862 struct r592_device *dev = dev_get_drvdata(core_dev);
864 r592_clear_interrupts(dev);
865 r592_enable_device(dev, false);
866 memstick_resume_host(dev->host);
867 r592_update_card_detect(dev);