1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Remote VUB300 SDIO/SDmem Host Controller Driver 4 * 5 * Copyright (C) 2010 Elan Digital Systems Limited 6 * 7 * based on USB Skeleton driver - 2.2 8 * 9 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) 10 * 11 * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot 12 * Any SDIO/SDmem/MMC device plugged into the VUB300 will appear, 13 * by virtue of this driver, to have been plugged into a local 14 * SDIO host controller, similar to, say, a PCI Ricoh controller 15 * This is because this kernel device driver is both a USB 2.0 16 * client device driver AND an MMC host controller driver. Thus 17 * if there is an existing driver for the inserted SDIO/SDmem/MMC 18 * device then that driver will be used by the kernel to manage 19 * the device in exactly the same fashion as if it had been 20 * directly plugged into, say, a local pci bus Ricoh controller 21 * 22 * RANT: this driver was written using a display 128x48 - converting it 23 * to a line width of 80 makes it very difficult to support. In 24 * particular functions have been broken down into sub functions 25 * and the original meaningful names have been shortened into 26 * cryptic ones. 27 * The problem is that executing a fragment of code subject to 28 * two conditions means an indentation of 24, thus leaving only 29 * 56 characters for a C statement. And that is quite ridiculous! 30 * 31 * Data types: data passed to/from the VUB300 is fixed to a number of 32 * bits and driver data fields reflect that limit by using 33 * u8, u16, u32 34 */ 35#include <linux/kernel.h> 36#include <linux/errno.h> 37#include <linux/init.h> 38#include <linux/slab.h> 39#include <linux/module.h> 40#include <linux/kref.h> 41#include <linux/uaccess.h> 42#include <linux/usb.h> 43#include <linux/mutex.h> 44#include <linux/mmc/host.h> 45#include <linux/mmc/card.h> 46#include <linux/mmc/sdio_func.h> 47#include <linux/mmc/sdio_ids.h> 48#include <linux/workqueue.h> 49#include <linux/ctype.h> 50#include <linux/firmware.h> 51#include <linux/scatterlist.h> 52 53struct host_controller_info { 54 u8 info_size; 55 u16 firmware_version; 56 u8 number_of_ports; 57} __packed; 58 59#define FIRMWARE_BLOCK_BOUNDARY 1024 60struct sd_command_header { 61 u8 header_size; 62 u8 header_type; 63 u8 port_number; 64 u8 command_type; /* Bit7 - Rd/Wr */ 65 u8 command_index; 66 u8 transfer_size[4]; /* ReadSize + ReadSize */ 67 u8 response_type; 68 u8 arguments[4]; 69 u8 block_count[2]; 70 u8 block_size[2]; 71 u8 block_boundary[2]; 72 u8 reserved[44]; /* to pad out to 64 bytes */ 73} __packed; 74 75struct sd_irqpoll_header { 76 u8 header_size; 77 u8 header_type; 78 u8 port_number; 79 u8 command_type; /* Bit7 - Rd/Wr */ 80 u8 padding[16]; /* don't ask why !! */ 81 u8 poll_timeout_msb; 82 u8 poll_timeout_lsb; 83 u8 reserved[42]; /* to pad out to 64 bytes */ 84} __packed; 85 86struct sd_common_header { 87 u8 header_size; 88 u8 header_type; 89 u8 port_number; 90} __packed; 91 92struct sd_response_header { 93 u8 header_size; 94 u8 header_type; 95 u8 port_number; 96 u8 command_type; 97 u8 command_index; 98 u8 command_response[]; 99} __packed; 100 101struct sd_status_header { 102 u8 header_size; 103 u8 header_type; 104 u8 port_number; 105 u16 port_flags; 106 u32 sdio_clock; 107 u16 host_header_size; 108 u16 func_header_size; 109 u16 ctrl_header_size; 110} __packed; 111 112struct sd_error_header { 113 u8 header_size; 114 u8 header_type; 115 u8 port_number; 116 u8 error_code; 117} __packed; 118 119struct sd_interrupt_header { 120 u8 header_size; 121 u8 header_type; 122 u8 port_number; 123} __packed; 124 125struct offload_registers_access { 126 u8 command_byte[4]; 127 u8 Respond_Byte[4]; 128} __packed; 129 130#define INTERRUPT_REGISTER_ACCESSES 15 131struct sd_offloaded_interrupt { 132 u8 header_size; 133 u8 header_type; 134 u8 port_number; 135 struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES]; 136} __packed; 137 138struct sd_register_header { 139 u8 header_size; 140 u8 header_type; 141 u8 port_number; 142 u8 command_type; 143 u8 command_index; 144 u8 command_response[6]; 145} __packed; 146 147#define PIGGYBACK_REGISTER_ACCESSES 14 148struct sd_offloaded_piggyback { 149 struct sd_register_header sdio; 150 struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES]; 151} __packed; 152 153union sd_response { 154 struct sd_common_header common; 155 struct sd_status_header status; 156 struct sd_error_header error; 157 struct sd_interrupt_header interrupt; 158 struct sd_response_header response; 159 struct sd_offloaded_interrupt irq; 160 struct sd_offloaded_piggyback pig; 161} __packed; 162 163union sd_command { 164 struct sd_command_header head; 165 struct sd_irqpoll_header poll; 166} __packed; 167 168enum SD_RESPONSE_TYPE { 169 SDRT_UNSPECIFIED = 0, 170 SDRT_NONE, 171 SDRT_1, 172 SDRT_1B, 173 SDRT_2, 174 SDRT_3, 175 SDRT_4, 176 SDRT_5, 177 SDRT_5B, 178 SDRT_6, 179 SDRT_7, 180}; 181 182#define RESPONSE_INTERRUPT 0x01 183#define RESPONSE_ERROR 0x02 184#define RESPONSE_STATUS 0x03 185#define RESPONSE_IRQ_DISABLED 0x05 186#define RESPONSE_IRQ_ENABLED 0x06 187#define RESPONSE_PIGGYBACKED 0x07 188#define RESPONSE_NO_INTERRUPT 0x08 189#define RESPONSE_PIG_DISABLED 0x09 190#define RESPONSE_PIG_ENABLED 0x0A 191#define SD_ERROR_1BIT_TIMEOUT 0x01 192#define SD_ERROR_4BIT_TIMEOUT 0x02 193#define SD_ERROR_1BIT_CRC_WRONG 0x03 194#define SD_ERROR_4BIT_CRC_WRONG 0x04 195#define SD_ERROR_1BIT_CRC_ERROR 0x05 196#define SD_ERROR_4BIT_CRC_ERROR 0x06 197#define SD_ERROR_NO_CMD_ENDBIT 0x07 198#define SD_ERROR_NO_1BIT_DATEND 0x08 199#define SD_ERROR_NO_4BIT_DATEND 0x09 200#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A 201#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B 202#define SD_ERROR_ILLEGAL_COMMAND 0x0C 203#define SD_ERROR_NO_DEVICE 0x0D 204#define SD_ERROR_TRANSFER_LENGTH 0x0E 205#define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F 206#define SD_ERROR_4BIT_DATA_TIMEOUT 0x10 207#define SD_ERROR_ILLEGAL_STATE 0x11 208#define SD_ERROR_UNKNOWN_ERROR 0x12 209#define SD_ERROR_RESERVED_ERROR 0x13 210#define SD_ERROR_INVALID_FUNCTION 0x14 211#define SD_ERROR_OUT_OF_RANGE 0x15 212#define SD_ERROR_STAT_CMD 0x16 213#define SD_ERROR_STAT_DATA 0x17 214#define SD_ERROR_STAT_CMD_TIMEOUT 0x18 215#define SD_ERROR_SDCRDY_STUCK 0x19 216#define SD_ERROR_UNHANDLED 0x1A 217#define SD_ERROR_OVERRUN 0x1B 218#define SD_ERROR_PIO_TIMEOUT 0x1C 219 220#define FUN(c) (0x000007 & (c->arg>>28)) 221#define REG(c) (0x01FFFF & (c->arg>>9)) 222 223static bool limit_speed_to_24_MHz; 224module_param(limit_speed_to_24_MHz, bool, 0644); 225MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz"); 226 227static bool pad_input_to_usb_pkt; 228module_param(pad_input_to_usb_pkt, bool, 0644); 229MODULE_PARM_DESC(pad_input_to_usb_pkt, 230 "Pad USB data input transfers to whole USB Packet"); 231 232static bool disable_offload_processing; 233module_param(disable_offload_processing, bool, 0644); 234MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing"); 235 236static bool force_1_bit_data_xfers; 237module_param(force_1_bit_data_xfers, bool, 0644); 238MODULE_PARM_DESC(force_1_bit_data_xfers, 239 "Force SDIO Data Transfers to 1-bit Mode"); 240 241static bool force_polling_for_irqs; 242module_param(force_polling_for_irqs, bool, 0644); 243MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts"); 244 245static int firmware_irqpoll_timeout = 1024; 246module_param(firmware_irqpoll_timeout, int, 0644); 247MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout"); 248 249static int force_max_req_size = 128; 250module_param(force_max_req_size, int, 0644); 251MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes"); 252 253#ifdef SMSC_DEVELOPMENT_BOARD 254static int firmware_rom_wait_states = 0x04; 255#else 256static int firmware_rom_wait_states = 0x1C; 257#endif 258 259module_param(firmware_rom_wait_states, int, 0644); 260MODULE_PARM_DESC(firmware_rom_wait_states, 261 "ROM wait states byte=RRRIIEEE (Reserved Internal External)"); 262 263#define ELAN_VENDOR_ID 0x2201 264#define VUB300_VENDOR_ID 0x0424 265#define VUB300_PRODUCT_ID 0x012C 266static const struct usb_device_id vub300_table[] = { 267 {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)}, 268 {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)}, 269 {} /* Terminating entry */ 270}; 271MODULE_DEVICE_TABLE(usb, vub300_table); 272 273static struct workqueue_struct *cmndworkqueue; 274static struct workqueue_struct *pollworkqueue; 275static struct workqueue_struct *deadworkqueue; 276 277static inline int interface_to_InterfaceNumber(struct usb_interface *interface) 278{ 279 if (!interface) 280 return -1; 281 if (!interface->cur_altsetting) 282 return -1; 283 return interface->cur_altsetting->desc.bInterfaceNumber; 284} 285 286struct sdio_register { 287 unsigned func_num:3; 288 unsigned sdio_reg:17; 289 unsigned activate:1; 290 unsigned prepared:1; 291 unsigned regvalue:8; 292 unsigned response:8; 293 unsigned sparebit:26; 294}; 295 296struct vub300_mmc_host { 297 struct usb_device *udev; 298 struct usb_interface *interface; 299 struct kref kref; 300 struct mutex cmd_mutex; 301 struct mutex irq_mutex; 302 char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */ 303 u8 cmnd_out_ep; /* EndPoint for commands */ 304 u8 cmnd_res_ep; /* EndPoint for responses */ 305 u8 data_out_ep; /* EndPoint for out data */ 306 u8 data_inp_ep; /* EndPoint for inp data */ 307 bool card_powered; 308 bool card_present; 309 bool read_only; 310 bool large_usb_packets; 311 bool app_spec; /* ApplicationSpecific */ 312 bool irq_enabled; /* by the MMC CORE */ 313 bool irq_disabled; /* in the firmware */ 314 unsigned bus_width:4; 315 u8 total_offload_count; 316 u8 dynamic_register_count; 317 u8 resp_len; 318 u32 datasize; 319 int errors; 320 int usb_transport_fail; 321 int usb_timed_out; 322 int irqs_queued; 323 struct sdio_register sdio_register[16]; 324 struct offload_interrupt_function_register { 325#define MAXREGBITS 4 326#define MAXREGS (1<<MAXREGBITS) 327#define MAXREGMASK (MAXREGS-1) 328 u8 offload_count; 329 u32 offload_point; 330 struct offload_registers_access reg[MAXREGS]; 331 } fn[8]; 332 u16 fbs[8]; /* Function Block Size */ 333 struct mmc_command *cmd; 334 struct mmc_request *req; 335 struct mmc_data *data; 336 struct mmc_host *mmc; 337 struct urb *urb; 338 struct urb *command_out_urb; 339 struct urb *command_res_urb; 340 struct completion command_complete; 341 struct completion irqpoll_complete; 342 union sd_command cmnd; 343 union sd_response resp; 344 struct timer_list sg_transfer_timer; 345 struct usb_sg_request sg_request; 346 struct timer_list inactivity_timer; 347 struct work_struct deadwork; 348 struct work_struct cmndwork; 349 struct delayed_work pollwork; 350 struct host_controller_info hc_info; 351 struct sd_status_header system_port_status; 352 u8 padded_buffer[64]; 353}; 354 355#define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref) 356#define SET_TRANSFER_PSEUDOCODE 21 357#define SET_INTERRUPT_PSEUDOCODE 20 358#define SET_FAILURE_MODE 18 359#define SET_ROM_WAIT_STATES 16 360#define SET_IRQ_ENABLE 13 361#define SET_CLOCK_SPEED 11 362#define SET_FUNCTION_BLOCK_SIZE 9 363#define SET_SD_DATA_MODE 6 364#define SET_SD_POWER 4 365#define ENTER_DFU_MODE 3 366#define GET_HC_INF0 1 367#define GET_SYSTEM_PORT_STATUS 0 368 369static void vub300_delete(struct kref *kref) 370{ /* kref callback - softirq */ 371 struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref); 372 struct mmc_host *mmc = vub300->mmc; 373 usb_free_urb(vub300->command_out_urb); 374 vub300->command_out_urb = NULL; 375 usb_free_urb(vub300->command_res_urb); 376 vub300->command_res_urb = NULL; 377 usb_put_dev(vub300->udev); 378 mmc_free_host(mmc); 379 /* 380 * and hence also frees vub300 381 * which is contained at the end of struct mmc 382 */ 383} 384 385static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300) 386{ 387 kref_get(&vub300->kref); 388 if (queue_work(cmndworkqueue, &vub300->cmndwork)) { 389 /* 390 * then the cmndworkqueue was not previously 391 * running and the above get ref is obvious 392 * required and will be put when the thread 393 * terminates by a specific call 394 */ 395 } else { 396 /* 397 * the cmndworkqueue was already running from 398 * a previous invocation and thus to keep the 399 * kref counts correct we must undo the get 400 */ 401 kref_put(&vub300->kref, vub300_delete); 402 } 403} 404 405static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay) 406{ 407 kref_get(&vub300->kref); 408 if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) { 409 /* 410 * then the pollworkqueue was not previously 411 * running and the above get ref is obvious 412 * required and will be put when the thread 413 * terminates by a specific call 414 */ 415 } else { 416 /* 417 * the pollworkqueue was already running from 418 * a previous invocation and thus to keep the 419 * kref counts correct we must undo the get 420 */ 421 kref_put(&vub300->kref, vub300_delete); 422 } 423} 424 425static void vub300_queue_dead_work(struct vub300_mmc_host *vub300) 426{ 427 kref_get(&vub300->kref); 428 if (queue_work(deadworkqueue, &vub300->deadwork)) { 429 /* 430 * then the deadworkqueue was not previously 431 * running and the above get ref is obvious 432 * required and will be put when the thread 433 * terminates by a specific call 434 */ 435 } else { 436 /* 437 * the deadworkqueue was already running from 438 * a previous invocation and thus to keep the 439 * kref counts correct we must undo the get 440 */ 441 kref_put(&vub300->kref, vub300_delete); 442 } 443} 444 445static void irqpoll_res_completed(struct urb *urb) 446{ /* urb completion handler - hardirq */ 447 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; 448 if (urb->status) 449 vub300->usb_transport_fail = urb->status; 450 complete(&vub300->irqpoll_complete); 451} 452 453static void irqpoll_out_completed(struct urb *urb) 454{ /* urb completion handler - hardirq */ 455 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; 456 if (urb->status) { 457 vub300->usb_transport_fail = urb->status; 458 complete(&vub300->irqpoll_complete); 459 return; 460 } else { 461 int ret; 462 unsigned int pipe = 463 usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); 464 usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, 465 &vub300->resp, sizeof(vub300->resp), 466 irqpoll_res_completed, vub300); 467 vub300->command_res_urb->actual_length = 0; 468 ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); 469 if (ret) { 470 vub300->usb_transport_fail = ret; 471 complete(&vub300->irqpoll_complete); 472 } 473 return; 474 } 475} 476 477static void send_irqpoll(struct vub300_mmc_host *vub300) 478{ 479 /* cmd_mutex is held by vub300_pollwork_thread */ 480 int retval; 481 int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout); 482 vub300->cmnd.poll.header_size = 22; 483 vub300->cmnd.poll.header_type = 1; 484 vub300->cmnd.poll.port_number = 0; 485 vub300->cmnd.poll.command_type = 2; 486 vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout; 487 vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8); 488 usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, 489 usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep) 490 , &vub300->cmnd, sizeof(vub300->cmnd) 491 , irqpoll_out_completed, vub300); 492 retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); 493 if (0 > retval) { 494 vub300->usb_transport_fail = retval; 495 vub300_queue_poll_work(vub300, 1); 496 complete(&vub300->irqpoll_complete); 497 return; 498 } else { 499 return; 500 } 501} 502 503static void new_system_port_status(struct vub300_mmc_host *vub300) 504{ 505 int old_card_present = vub300->card_present; 506 int new_card_present = 507 (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; 508 vub300->read_only = 509 (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; 510 if (new_card_present && !old_card_present) { 511 dev_info(&vub300->udev->dev, "card just inserted\n"); 512 vub300->card_present = 1; 513 vub300->bus_width = 0; 514 if (disable_offload_processing) 515 strncpy(vub300->vub_name, "EMPTY Processing Disabled", 516 sizeof(vub300->vub_name)); 517 else 518 vub300->vub_name[0] = 0; 519 mmc_detect_change(vub300->mmc, 1); 520 } else if (!new_card_present && old_card_present) { 521 dev_info(&vub300->udev->dev, "card just ejected\n"); 522 vub300->card_present = 0; 523 mmc_detect_change(vub300->mmc, 0); 524 } else { 525 /* no change */ 526 } 527} 528 529static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300, 530 struct offload_registers_access 531 *register_access, u8 func) 532{ 533 u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count; 534 memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access, 535 sizeof(struct offload_registers_access)); 536 vub300->fn[func].offload_count += 1; 537 vub300->total_offload_count += 1; 538} 539 540static void add_offloaded_reg(struct vub300_mmc_host *vub300, 541 struct offload_registers_access *register_access) 542{ 543 u32 Register = ((0x03 & register_access->command_byte[0]) << 15) 544 | ((0xFF & register_access->command_byte[1]) << 7) 545 | ((0xFE & register_access->command_byte[2]) >> 1); 546 u8 func = ((0x70 & register_access->command_byte[0]) >> 4); 547 u8 regs = vub300->dynamic_register_count; 548 u8 i = 0; 549 while (0 < regs-- && 1 == vub300->sdio_register[i].activate) { 550 if (vub300->sdio_register[i].func_num == func && 551 vub300->sdio_register[i].sdio_reg == Register) { 552 if (vub300->sdio_register[i].prepared == 0) 553 vub300->sdio_register[i].prepared = 1; 554 vub300->sdio_register[i].response = 555 register_access->Respond_Byte[2]; 556 vub300->sdio_register[i].regvalue = 557 register_access->Respond_Byte[3]; 558 return; 559 } else { 560 i += 1; 561 continue; 562 } 563 } 564 __add_offloaded_reg_to_fifo(vub300, register_access, func); 565} 566 567static void check_vub300_port_status(struct vub300_mmc_host *vub300) 568{ 569 /* 570 * cmd_mutex is held by vub300_pollwork_thread, 571 * vub300_deadwork_thread or vub300_cmndwork_thread 572 */ 573 int retval; 574 retval = 575 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), 576 GET_SYSTEM_PORT_STATUS, 577 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 578 0x0000, 0x0000, &vub300->system_port_status, 579 sizeof(vub300->system_port_status), 1000); 580 if (sizeof(vub300->system_port_status) == retval) 581 new_system_port_status(vub300); 582} 583 584static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300) 585{ 586 /* cmd_mutex is held by vub300_pollwork_thread */ 587 if (vub300->command_res_urb->actual_length == 0) 588 return; 589 590 switch (vub300->resp.common.header_type) { 591 case RESPONSE_INTERRUPT: 592 mutex_lock(&vub300->irq_mutex); 593 if (vub300->irq_enabled) 594 mmc_signal_sdio_irq(vub300->mmc); 595 else 596 vub300->irqs_queued += 1; 597 vub300->irq_disabled = 1; 598 mutex_unlock(&vub300->irq_mutex); 599 break; 600 case RESPONSE_ERROR: 601 if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE) 602 check_vub300_port_status(vub300); 603 break; 604 case RESPONSE_STATUS: 605 vub300->system_port_status = vub300->resp.status; 606 new_system_port_status(vub300); 607 if (!vub300->card_present) 608 vub300_queue_poll_work(vub300, HZ / 5); 609 break; 610 case RESPONSE_IRQ_DISABLED: 611 { 612 int offloaded_data_length = vub300->resp.common.header_size - 3; 613 int register_count = offloaded_data_length >> 3; 614 int ri = 0; 615 while (register_count--) { 616 add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); 617 ri += 1; 618 } 619 mutex_lock(&vub300->irq_mutex); 620 if (vub300->irq_enabled) 621 mmc_signal_sdio_irq(vub300->mmc); 622 else 623 vub300->irqs_queued += 1; 624 vub300->irq_disabled = 1; 625 mutex_unlock(&vub300->irq_mutex); 626 break; 627 } 628 case RESPONSE_IRQ_ENABLED: 629 { 630 int offloaded_data_length = vub300->resp.common.header_size - 3; 631 int register_count = offloaded_data_length >> 3; 632 int ri = 0; 633 while (register_count--) { 634 add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); 635 ri += 1; 636 } 637 mutex_lock(&vub300->irq_mutex); 638 if (vub300->irq_enabled) 639 mmc_signal_sdio_irq(vub300->mmc); 640 else 641 vub300->irqs_queued += 1; 642 vub300->irq_disabled = 0; 643 mutex_unlock(&vub300->irq_mutex); 644 break; 645 } 646 case RESPONSE_NO_INTERRUPT: 647 vub300_queue_poll_work(vub300, 1); 648 break; 649 default: 650 break; 651 } 652} 653 654static void __do_poll(struct vub300_mmc_host *vub300) 655{ 656 /* cmd_mutex is held by vub300_pollwork_thread */ 657 unsigned long commretval; 658 mod_timer(&vub300->inactivity_timer, jiffies + HZ); 659 init_completion(&vub300->irqpoll_complete); 660 send_irqpoll(vub300); 661 commretval = wait_for_completion_timeout(&vub300->irqpoll_complete, 662 msecs_to_jiffies(500)); 663 if (vub300->usb_transport_fail) { 664 /* no need to do anything */ 665 } else if (commretval == 0) { 666 vub300->usb_timed_out = 1; 667 usb_kill_urb(vub300->command_out_urb); 668 usb_kill_urb(vub300->command_res_urb); 669 } else { /* commretval > 0 */ 670 __vub300_irqpoll_response(vub300); 671 } 672} 673 674/* this thread runs only when the driver 675 * is trying to poll the device for an IRQ 676 */ 677static void vub300_pollwork_thread(struct work_struct *work) 678{ /* NOT irq */ 679 struct vub300_mmc_host *vub300 = container_of(work, 680 struct vub300_mmc_host, pollwork.work); 681 if (!vub300->interface) { 682 kref_put(&vub300->kref, vub300_delete); 683 return; 684 } 685 mutex_lock(&vub300->cmd_mutex); 686 if (vub300->cmd) { 687 vub300_queue_poll_work(vub300, 1); 688 } else if (!vub300->card_present) { 689 /* no need to do anything */ 690 } else { /* vub300->card_present */ 691 mutex_lock(&vub300->irq_mutex); 692 if (!vub300->irq_enabled) { 693 mutex_unlock(&vub300->irq_mutex); 694 } else if (vub300->irqs_queued) { 695 vub300->irqs_queued -= 1; 696 mmc_signal_sdio_irq(vub300->mmc); 697 mod_timer(&vub300->inactivity_timer, jiffies + HZ); 698 mutex_unlock(&vub300->irq_mutex); 699 } else { /* NOT vub300->irqs_queued */ 700 mutex_unlock(&vub300->irq_mutex); 701 __do_poll(vub300); 702 } 703 } 704 mutex_unlock(&vub300->cmd_mutex); 705 kref_put(&vub300->kref, vub300_delete); 706} 707 708static void vub300_deadwork_thread(struct work_struct *work) 709{ /* NOT irq */ 710 struct vub300_mmc_host *vub300 = 711 container_of(work, struct vub300_mmc_host, deadwork); 712 if (!vub300->interface) { 713 kref_put(&vub300->kref, vub300_delete); 714 return; 715 } 716 mutex_lock(&vub300->cmd_mutex); 717 if (vub300->cmd) { 718 /* 719 * a command got in as the inactivity 720 * timer expired - so we just let the 721 * processing of the command show if 722 * the device is dead 723 */ 724 } else if (vub300->card_present) { 725 check_vub300_port_status(vub300); 726 } else if (vub300->mmc && vub300->mmc->card) { 727 /* 728 * the MMC core must not have responded 729 * to the previous indication - lets 730 * hope that it eventually does so we 731 * will just ignore this for now 732 */ 733 } else { 734 check_vub300_port_status(vub300); 735 } 736 mod_timer(&vub300->inactivity_timer, jiffies + HZ); 737 mutex_unlock(&vub300->cmd_mutex); 738 kref_put(&vub300->kref, vub300_delete); 739} 740 741static void vub300_inactivity_timer_expired(struct timer_list *t) 742{ /* softirq */ 743 struct vub300_mmc_host *vub300 = from_timer(vub300, t, 744 inactivity_timer); 745 if (!vub300->interface) { 746 kref_put(&vub300->kref, vub300_delete); 747 } else if (vub300->cmd) { 748 mod_timer(&vub300->inactivity_timer, jiffies + HZ); 749 } else { 750 vub300_queue_dead_work(vub300); 751 mod_timer(&vub300->inactivity_timer, jiffies + HZ); 752 } 753} 754 755static int vub300_response_error(u8 error_code) 756{ 757 switch (error_code) { 758 case SD_ERROR_PIO_TIMEOUT: 759 case SD_ERROR_1BIT_TIMEOUT: 760 case SD_ERROR_4BIT_TIMEOUT: 761 return -ETIMEDOUT; 762 case SD_ERROR_STAT_DATA: 763 case SD_ERROR_OVERRUN: 764 case SD_ERROR_STAT_CMD: 765 case SD_ERROR_STAT_CMD_TIMEOUT: 766 case SD_ERROR_SDCRDY_STUCK: 767 case SD_ERROR_UNHANDLED: 768 case SD_ERROR_1BIT_CRC_WRONG: 769 case SD_ERROR_4BIT_CRC_WRONG: 770 case SD_ERROR_1BIT_CRC_ERROR: 771 case SD_ERROR_4BIT_CRC_ERROR: 772 case SD_ERROR_NO_CMD_ENDBIT: 773 case SD_ERROR_NO_1BIT_DATEND: 774 case SD_ERROR_NO_4BIT_DATEND: 775 case SD_ERROR_1BIT_DATA_TIMEOUT: 776 case SD_ERROR_4BIT_DATA_TIMEOUT: 777 case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT: 778 case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT: 779 return -EILSEQ; 780 case 33: 781 return -EILSEQ; 782 case SD_ERROR_ILLEGAL_COMMAND: 783 return -EINVAL; 784 case SD_ERROR_NO_DEVICE: 785 return -ENOMEDIUM; 786 default: 787 return -ENODEV; 788 } 789} 790 791static void command_res_completed(struct urb *urb) 792{ /* urb completion handler - hardirq */ 793 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; 794 if (urb->status) { 795 /* we have to let the initiator handle the error */ 796 } else if (vub300->command_res_urb->actual_length == 0) { 797 /* 798 * we have seen this happen once or twice and 799 * we suspect a buggy USB host controller 800 */ 801 } else if (!vub300->data) { 802 /* this means that the command (typically CMD52) succeeded */ 803 } else if (vub300->resp.common.header_type != 0x02) { 804 /* 805 * this is an error response from the VUB300 chip 806 * and we let the initiator handle it 807 */ 808 } else if (vub300->urb) { 809 vub300->cmd->error = 810 vub300_response_error(vub300->resp.error.error_code); 811 usb_unlink_urb(vub300->urb); 812 } else { 813 vub300->cmd->error = 814 vub300_response_error(vub300->resp.error.error_code); 815 usb_sg_cancel(&vub300->sg_request); 816 } 817 complete(&vub300->command_complete); /* got_response_in */ 818} 819 820static void command_out_completed(struct urb *urb) 821{ /* urb completion handler - hardirq */ 822 struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; 823 if (urb->status) { 824 complete(&vub300->command_complete); 825 } else { 826 int ret; 827 unsigned int pipe = 828 usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); 829 usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, 830 &vub300->resp, sizeof(vub300->resp), 831 command_res_completed, vub300); 832 vub300->command_res_urb->actual_length = 0; 833 ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); 834 if (ret == 0) { 835 /* 836 * the urb completion handler will call 837 * our completion handler 838 */ 839 } else { 840 /* 841 * and thus we only call it directly 842 * when it will not be called 843 */ 844 complete(&vub300->command_complete); 845 } 846 } 847} 848 849/* 850 * the STUFF bits are masked out for the comparisons 851 */ 852static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300, 853 u32 cmd_arg) 854{ 855 if ((0xFBFFFE00 & cmd_arg) == 0x80022200) 856 vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]); 857 else if ((0xFBFFFE00 & cmd_arg) == 0x80022000) 858 vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]); 859 else if ((0xFBFFFE00 & cmd_arg) == 0x80042200) 860 vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]); 861 else if ((0xFBFFFE00 & cmd_arg) == 0x80042000) 862 vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]); 863 else if ((0xFBFFFE00 & cmd_arg) == 0x80062200) 864 vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]); 865 else if ((0xFBFFFE00 & cmd_arg) == 0x80062000) 866 vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]); 867 else if ((0xFBFFFE00 & cmd_arg) == 0x80082200) 868 vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]); 869 else if ((0xFBFFFE00 & cmd_arg) == 0x80082000) 870 vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]); 871 else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200) 872 vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]); 873 else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000) 874 vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]); 875 else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200) 876 vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]); 877 else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000) 878 vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]); 879 else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200) 880 vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]); 881 else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000) 882 vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]); 883 else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00) 884 vub300->bus_width = 1; 885 else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02) 886 vub300->bus_width = 4; 887} 888 889static void send_command(struct vub300_mmc_host *vub300) 890{ 891 /* cmd_mutex is held by vub300_cmndwork_thread */ 892 struct mmc_command *cmd = vub300->cmd; 893 struct mmc_data *data = vub300->data; 894 int retval; 895 int i; 896 u8 response_type; 897 if (vub300->app_spec) { 898 switch (cmd->opcode) { 899 case 6: 900 response_type = SDRT_1; 901 vub300->resp_len = 6; 902 if (0x00000000 == (0x00000003 & cmd->arg)) 903 vub300->bus_width = 1; 904 else if (0x00000002 == (0x00000003 & cmd->arg)) 905 vub300->bus_width = 4; 906 else 907 dev_err(&vub300->udev->dev, 908 "unexpected ACMD6 bus_width=%d\n", 909 0x00000003 & cmd->arg); 910 break; 911 case 13: 912 response_type = SDRT_1; 913 vub300->resp_len = 6; 914 break; 915 case 22: 916 response_type = SDRT_1; 917 vub300->resp_len = 6; 918 break; 919 case 23: 920 response_type = SDRT_1; 921 vub300->resp_len = 6; 922 break; 923 case 41: 924 response_type = SDRT_3; 925 vub300->resp_len = 6; 926 break; 927 case 42: 928 response_type = SDRT_1; 929 vub300->resp_len = 6; 930 break; 931 case 51: 932 response_type = SDRT_1; 933 vub300->resp_len = 6; 934 break; 935 case 55: 936 response_type = SDRT_1; 937 vub300->resp_len = 6; 938 break; 939 default: 940 vub300->resp_len = 0; 941 cmd->error = -EINVAL; 942 complete(&vub300->command_complete); 943 return; 944 } 945 vub300->app_spec = 0; 946 } else { 947 switch (cmd->opcode) { 948 case 0: 949 response_type = SDRT_NONE; 950 vub300->resp_len = 0; 951 break; 952 case 1: 953 response_type = SDRT_3; 954 vub300->resp_len = 6; 955 break; 956 case 2: 957 response_type = SDRT_2; 958 vub300->resp_len = 17; 959 break; 960 case 3: 961 response_type = SDRT_6; 962 vub300->resp_len = 6; 963 break; 964 case 4: 965 response_type = SDRT_NONE; 966 vub300->resp_len = 0; 967 break; 968 case 5: 969 response_type = SDRT_4; 970 vub300->resp_len = 6; 971 break; 972 case 6: 973 response_type = SDRT_1; 974 vub300->resp_len = 6; 975 break; 976 case 7: 977 response_type = SDRT_1B; 978 vub300->resp_len = 6; 979 break; 980 case 8: 981 response_type = SDRT_7; 982 vub300->resp_len = 6; 983 break; 984 case 9: 985 response_type = SDRT_2; 986 vub300->resp_len = 17; 987 break; 988 case 10: 989 response_type = SDRT_2; 990 vub300->resp_len = 17; 991 break; 992 case 12: 993 response_type = SDRT_1B; 994 vub300->resp_len = 6; 995 break; 996 case 13: 997 response_type = SDRT_1; 998 vub300->resp_len = 6; 999 break; 1000 case 15: 1001 response_type = SDRT_NONE; 1002 vub300->resp_len = 0; 1003 break; 1004 case 16: 1005 for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) 1006 vub300->fbs[i] = 0xFFFF & cmd->arg; 1007 response_type = SDRT_1; 1008 vub300->resp_len = 6; 1009 break; 1010 case 17: 1011 case 18: 1012 case 24: 1013 case 25: 1014 case 27: 1015 response_type = SDRT_1; 1016 vub300->resp_len = 6; 1017 break; 1018 case 28: 1019 case 29: 1020 response_type = SDRT_1B; 1021 vub300->resp_len = 6; 1022 break; 1023 case 30: 1024 case 32: 1025 case 33: 1026 response_type = SDRT_1; 1027 vub300->resp_len = 6; 1028 break; 1029 case 38: 1030 response_type = SDRT_1B; 1031 vub300->resp_len = 6; 1032 break; 1033 case 42: 1034 response_type = SDRT_1; 1035 vub300->resp_len = 6; 1036 break; 1037 case 52: 1038 response_type = SDRT_5; 1039 vub300->resp_len = 6; 1040 snoop_block_size_and_bus_width(vub300, cmd->arg); 1041 break; 1042 case 53: 1043 response_type = SDRT_5; 1044 vub300->resp_len = 6; 1045 break; 1046 case 55: 1047 response_type = SDRT_1; 1048 vub300->resp_len = 6; 1049 vub300->app_spec = 1; 1050 break; 1051 case 56: 1052 response_type = SDRT_1; 1053 vub300->resp_len = 6; 1054 break; 1055 default: 1056 vub300->resp_len = 0; 1057 cmd->error = -EINVAL; 1058 complete(&vub300->command_complete); 1059 return; 1060 } 1061 } 1062 /* 1063 * it is a shame that we can not use "sizeof(struct sd_command_header)" 1064 * this is because the packet _must_ be padded to 64 bytes 1065 */ 1066 vub300->cmnd.head.header_size = 20; 1067 vub300->cmnd.head.header_type = 0x00; 1068 vub300->cmnd.head.port_number = 0; /* "0" means port 1 */ 1069 vub300->cmnd.head.command_type = 0x00; /* standard read command */ 1070 vub300->cmnd.head.response_type = response_type; 1071 vub300->cmnd.head.command_index = cmd->opcode; 1072 vub300->cmnd.head.arguments[0] = cmd->arg >> 24; 1073 vub300->cmnd.head.arguments[1] = cmd->arg >> 16; 1074 vub300->cmnd.head.arguments[2] = cmd->arg >> 8; 1075 vub300->cmnd.head.arguments[3] = cmd->arg >> 0; 1076 if (cmd->opcode == 52) { 1077 int fn = 0x7 & (cmd->arg >> 28); 1078 vub300->cmnd.head.block_count[0] = 0; 1079 vub300->cmnd.head.block_count[1] = 0; 1080 vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF; 1081 vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF; 1082 vub300->cmnd.head.command_type = 0x00; 1083 vub300->cmnd.head.transfer_size[0] = 0; 1084 vub300->cmnd.head.transfer_size[1] = 0; 1085 vub300->cmnd.head.transfer_size[2] = 0; 1086 vub300->cmnd.head.transfer_size[3] = 0; 1087 } else if (!data) { 1088 vub300->cmnd.head.block_count[0] = 0; 1089 vub300->cmnd.head.block_count[1] = 0; 1090 vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF; 1091 vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF; 1092 vub300->cmnd.head.command_type = 0x00; 1093 vub300->cmnd.head.transfer_size[0] = 0; 1094 vub300->cmnd.head.transfer_size[1] = 0; 1095 vub300->cmnd.head.transfer_size[2] = 0; 1096 vub300->cmnd.head.transfer_size[3] = 0; 1097 } else if (cmd->opcode == 53) { 1098 int fn = 0x7 & (cmd->arg >> 28); 1099 if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */ 1100 vub300->cmnd.head.block_count[0] = 1101 (data->blocks >> 8) & 0xFF; 1102 vub300->cmnd.head.block_count[1] = 1103 (data->blocks >> 0) & 0xFF; 1104 vub300->cmnd.head.block_size[0] = 1105 (data->blksz >> 8) & 0xFF; 1106 vub300->cmnd.head.block_size[1] = 1107 (data->blksz >> 0) & 0xFF; 1108 } else { /* BYTE MODE */ 1109 vub300->cmnd.head.block_count[0] = 0; 1110 vub300->cmnd.head.block_count[1] = 0; 1111 vub300->cmnd.head.block_size[0] = 1112 (vub300->datasize >> 8) & 0xFF; 1113 vub300->cmnd.head.block_size[1] = 1114 (vub300->datasize >> 0) & 0xFF; 1115 } 1116 vub300->cmnd.head.command_type = 1117 (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; 1118 vub300->cmnd.head.transfer_size[0] = 1119 (vub300->datasize >> 24) & 0xFF; 1120 vub300->cmnd.head.transfer_size[1] = 1121 (vub300->datasize >> 16) & 0xFF; 1122 vub300->cmnd.head.transfer_size[2] = 1123 (vub300->datasize >> 8) & 0xFF; 1124 vub300->cmnd.head.transfer_size[3] = 1125 (vub300->datasize >> 0) & 0xFF; 1126 if (vub300->datasize < vub300->fbs[fn]) { 1127 vub300->cmnd.head.block_count[0] = 0; 1128 vub300->cmnd.head.block_count[1] = 0; 1129 } 1130 } else { 1131 vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF; 1132 vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF; 1133 vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF; 1134 vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF; 1135 vub300->cmnd.head.command_type = 1136 (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; 1137 vub300->cmnd.head.transfer_size[0] = 1138 (vub300->datasize >> 24) & 0xFF; 1139 vub300->cmnd.head.transfer_size[1] = 1140 (vub300->datasize >> 16) & 0xFF; 1141 vub300->cmnd.head.transfer_size[2] = 1142 (vub300->datasize >> 8) & 0xFF; 1143 vub300->cmnd.head.transfer_size[3] = 1144 (vub300->datasize >> 0) & 0xFF; 1145 if (vub300->datasize < vub300->fbs[0]) { 1146 vub300->cmnd.head.block_count[0] = 0; 1147 vub300->cmnd.head.block_count[1] = 0; 1148 } 1149 } 1150 if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) { 1151 u16 block_size = vub300->cmnd.head.block_size[1] | 1152 (vub300->cmnd.head.block_size[0] << 8); 1153 u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY - 1154 (FIRMWARE_BLOCK_BOUNDARY % block_size); 1155 vub300->cmnd.head.block_boundary[0] = 1156 (block_boundary >> 8) & 0xFF; 1157 vub300->cmnd.head.block_boundary[1] = 1158 (block_boundary >> 0) & 0xFF; 1159 } else { 1160 vub300->cmnd.head.block_boundary[0] = 0; 1161 vub300->cmnd.head.block_boundary[1] = 0; 1162 } 1163 usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, 1164 usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep), 1165 &vub300->cmnd, sizeof(vub300->cmnd), 1166 command_out_completed, vub300); 1167 retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); 1168 if (retval < 0) { 1169 cmd->error = retval; 1170 complete(&vub300->command_complete); 1171 return; 1172 } else { 1173 return; 1174 } 1175} 1176 1177/* 1178 * timer callback runs in atomic mode 1179 * so it cannot call usb_kill_urb() 1180 */ 1181static void vub300_sg_timed_out(struct timer_list *t) 1182{ 1183 struct vub300_mmc_host *vub300 = from_timer(vub300, t, 1184 sg_transfer_timer); 1185 vub300->usb_timed_out = 1; 1186 usb_sg_cancel(&vub300->sg_request); 1187 usb_unlink_urb(vub300->command_out_urb); 1188 usb_unlink_urb(vub300->command_res_urb); 1189} 1190 1191static u16 roundup_to_multiple_of_64(u16 number) 1192{ 1193 return 0xFFC0 & (0x3F + number); 1194} 1195 1196/* 1197 * this is a separate function to solve the 80 column width restriction 1198 */ 1199static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, 1200 const struct firmware *fw) 1201{ 1202 u8 register_count = 0; 1203 u16 ts = 0; 1204 u16 interrupt_size = 0; 1205 const u8 *data = fw->data; 1206 int size = fw->size; 1207 u8 c; 1208 dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n", 1209 vub300->vub_name); 1210 do { 1211 c = *data++; 1212 } while (size-- && c); /* skip comment */ 1213 dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data, 1214 vub300->vub_name); 1215 if (size < 4) { 1216 dev_err(&vub300->udev->dev, 1217 "corrupt offload pseudocode in firmware %s\n", 1218 vub300->vub_name); 1219 strncpy(vub300->vub_name, "corrupt offload pseudocode", 1220 sizeof(vub300->vub_name)); 1221 return; 1222 } 1223 interrupt_size += *data++; 1224 size -= 1; 1225 interrupt_size <<= 8; 1226 interrupt_size += *data++; 1227 size -= 1; 1228 if (interrupt_size < size) { 1229 u16 xfer_length = roundup_to_multiple_of_64(interrupt_size); 1230 u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); 1231 if (xfer_buffer) { 1232 int retval; 1233 memcpy(xfer_buffer, data, interrupt_size); 1234 memset(xfer_buffer + interrupt_size, 0, 1235 xfer_length - interrupt_size); 1236 size -= interrupt_size; 1237 data += interrupt_size; 1238 retval = 1239 usb_control_msg(vub300->udev, 1240 usb_sndctrlpipe(vub300->udev, 0), 1241 SET_INTERRUPT_PSEUDOCODE, 1242 USB_DIR_OUT | USB_TYPE_VENDOR | 1243 USB_RECIP_DEVICE, 0x0000, 0x0000, 1244 xfer_buffer, xfer_length, 1000); 1245 kfree(xfer_buffer); 1246 if (retval < 0) 1247 goto copy_error_message; 1248 } else { 1249 dev_err(&vub300->udev->dev, 1250 "not enough memory for xfer buffer to send" 1251 " INTERRUPT_PSEUDOCODE for %s %s\n", fw->data, 1252 vub300->vub_name); 1253 strncpy(vub300->vub_name, 1254 "SDIO interrupt pseudocode download failed", 1255 sizeof(vub300->vub_name)); 1256 return; 1257 } 1258 } else { 1259 dev_err(&vub300->udev->dev, 1260 "corrupt interrupt pseudocode in firmware %s %s\n", 1261 fw->data, vub300->vub_name); 1262 strncpy(vub300->vub_name, "corrupt interrupt pseudocode", 1263 sizeof(vub300->vub_name)); 1264 return; 1265 } 1266 ts += *data++; 1267 size -= 1; 1268 ts <<= 8; 1269 ts += *data++; 1270 size -= 1; 1271 if (ts < size) { 1272 u16 xfer_length = roundup_to_multiple_of_64(ts); 1273 u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); 1274 if (xfer_buffer) { 1275 int retval; 1276 memcpy(xfer_buffer, data, ts); 1277 memset(xfer_buffer + ts, 0, 1278 xfer_length - ts); 1279 size -= ts; 1280 data += ts; 1281 retval = 1282 usb_control_msg(vub300->udev, 1283 usb_sndctrlpipe(vub300->udev, 0), 1284 SET_TRANSFER_PSEUDOCODE, 1285 USB_DIR_OUT | USB_TYPE_VENDOR | 1286 USB_RECIP_DEVICE, 0x0000, 0x0000, 1287 xfer_buffer, xfer_length, 1000); 1288 kfree(xfer_buffer); 1289 if (retval < 0) 1290 goto copy_error_message; 1291 } else { 1292 dev_err(&vub300->udev->dev, 1293 "not enough memory for xfer buffer to send" 1294 " TRANSFER_PSEUDOCODE for %s %s\n", fw->data, 1295 vub300->vub_name); 1296 strncpy(vub300->vub_name, 1297 "SDIO transfer pseudocode download failed", 1298 sizeof(vub300->vub_name)); 1299 return; 1300 } 1301 } else { 1302 dev_err(&vub300->udev->dev, 1303 "corrupt transfer pseudocode in firmware %s %s\n", 1304 fw->data, vub300->vub_name); 1305 strncpy(vub300->vub_name, "corrupt transfer pseudocode", 1306 sizeof(vub300->vub_name)); 1307 return; 1308 } 1309 register_count += *data++; 1310 size -= 1; 1311 if (register_count * 4 == size) { 1312 int I = vub300->dynamic_register_count = register_count; 1313 int i = 0; 1314 while (I--) { 1315 unsigned int func_num = 0; 1316 vub300->sdio_register[i].func_num = *data++; 1317 size -= 1; 1318 func_num += *data++; 1319 size -= 1; 1320 func_num <<= 8; 1321 func_num += *data++; 1322 size -= 1; 1323 func_num <<= 8; 1324 func_num += *data++; 1325 size -= 1; 1326 vub300->sdio_register[i].sdio_reg = func_num; 1327 vub300->sdio_register[i].activate = 1; 1328 vub300->sdio_register[i].prepared = 0; 1329 i += 1; 1330 } 1331 dev_info(&vub300->udev->dev, 1332 "initialized %d dynamic pseudocode registers\n", 1333 vub300->dynamic_register_count); 1334 return; 1335 } else { 1336 dev_err(&vub300->udev->dev, 1337 "corrupt dynamic registers in firmware %s\n", 1338 vub300->vub_name); 1339 strncpy(vub300->vub_name, "corrupt dynamic registers", 1340 sizeof(vub300->vub_name)); 1341 return; 1342 } 1343 1344 return; 1345 1346copy_error_message: 1347 strncpy(vub300->vub_name, "SDIO pseudocode download failed", 1348 sizeof(vub300->vub_name)); 1349} 1350 1351/* 1352 * if the binary containing the EMPTY PseudoCode can not be found 1353 * vub300->vub_name is set anyway in order to prevent an automatic retry 1354 */ 1355static void download_offload_pseudocode(struct vub300_mmc_host *vub300) 1356{ 1357 struct mmc_card *card = vub300->mmc->card; 1358 int sdio_funcs = card->sdio_funcs; 1359 const struct firmware *fw = NULL; 1360 int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name), 1361 "vub_%04X%04X", card->cis.vendor, card->cis.device); 1362 int n = 0; 1363 int retval; 1364 for (n = 0; n < sdio_funcs; n++) { 1365 struct sdio_func *sf = card->sdio_func[n]; 1366 l += scnprintf(vub300->vub_name + l, 1367 sizeof(vub300->vub_name) - l, "_%04X%04X", 1368 sf->vendor, sf->device); 1369 } 1370 snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin"); 1371 dev_info(&vub300->udev->dev, "requesting offload firmware %s\n", 1372 vub300->vub_name); 1373 retval = request_firmware(&fw, vub300->vub_name, &card->dev); 1374 if (retval < 0) { 1375 strncpy(vub300->vub_name, "vub_default.bin", 1376 sizeof(vub300->vub_name)); 1377 retval = request_firmware(&fw, vub300->vub_name, &card->dev); 1378 if (retval < 0) { 1379 strncpy(vub300->vub_name, 1380 "no SDIO offload firmware found", 1381 sizeof(vub300->vub_name)); 1382 } else { 1383 __download_offload_pseudocode(vub300, fw); 1384 release_firmware(fw); 1385 } 1386 } else { 1387 __download_offload_pseudocode(vub300, fw); 1388 release_firmware(fw); 1389 } 1390} 1391 1392static void vub300_usb_bulk_msg_completion(struct urb *urb) 1393{ /* urb completion handler - hardirq */ 1394 complete((struct completion *)urb->context); 1395} 1396 1397static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300, 1398 unsigned int pipe, void *data, int len, 1399 int *actual_length, int timeout_msecs) 1400{ 1401 /* cmd_mutex is held by vub300_cmndwork_thread */ 1402 struct usb_device *usb_dev = vub300->udev; 1403 struct completion done; 1404 int retval; 1405 vub300->urb = usb_alloc_urb(0, GFP_KERNEL); 1406 if (!vub300->urb) 1407 return -ENOMEM; 1408 usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len, 1409 vub300_usb_bulk_msg_completion, NULL); 1410 init_completion(&done); 1411 vub300->urb->context = &done; 1412 vub300->urb->actual_length = 0; 1413 retval = usb_submit_urb(vub300->urb, GFP_KERNEL); 1414 if (unlikely(retval)) 1415 goto out; 1416 if (!wait_for_completion_timeout 1417 (&done, msecs_to_jiffies(timeout_msecs))) { 1418 retval = -ETIMEDOUT; 1419 usb_kill_urb(vub300->urb); 1420 } else { 1421 retval = vub300->urb->status; 1422 } 1423out: 1424 *actual_length = vub300->urb->actual_length; 1425 usb_free_urb(vub300->urb); 1426 vub300->urb = NULL; 1427 return retval; 1428} 1429 1430static int __command_read_data(struct vub300_mmc_host *vub300, 1431 struct mmc_command *cmd, struct mmc_data *data) 1432{ 1433 /* cmd_mutex is held by vub300_cmndwork_thread */ 1434 int linear_length = vub300->datasize; 1435 int padded_length = vub300->large_usb_packets ? 1436 ((511 + linear_length) >> 9) << 9 : 1437 ((63 + linear_length) >> 6) << 6; 1438 if ((padded_length == linear_length) || !pad_input_to_usb_pkt) { 1439 int result; 1440 unsigned pipe; 1441 pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep); 1442 result = usb_sg_init(&vub300->sg_request, vub300->udev, 1443 pipe, 0, data->sg, 1444 data->sg_len, 0, GFP_KERNEL); 1445 if (result < 0) { 1446 usb_unlink_urb(vub300->command_out_urb); 1447 usb_unlink_urb(vub300->command_res_urb); 1448 cmd->error = result; 1449 data->bytes_xfered = 0; 1450 return 0; 1451 } else { 1452 vub300->sg_transfer_timer.expires = 1453 jiffies + msecs_to_jiffies(2000 + 1454 (linear_length / 16384)); 1455 add_timer(&vub300->sg_transfer_timer); 1456 usb_sg_wait(&vub300->sg_request); 1457 del_timer(&vub300->sg_transfer_timer); 1458 if (vub300->sg_request.status < 0) { 1459 cmd->error = vub300->sg_request.status; 1460 data->bytes_xfered = 0; 1461 return 0; 1462 } else { 1463 data->bytes_xfered = vub300->datasize; 1464 return linear_length; 1465 } 1466 } 1467 } else { 1468 u8 *buf = kmalloc(padded_length, GFP_KERNEL); 1469 if (buf) { 1470 int result; 1471 unsigned pipe = usb_rcvbulkpipe(vub300->udev, 1472 vub300->data_inp_ep); 1473 int actual_length = 0; 1474 result = vub300_usb_bulk_msg(vub300, pipe, buf, 1475 padded_length, &actual_length, 1476 2000 + (padded_length / 16384)); 1477 if (result < 0) { 1478 cmd->error = result; 1479 data->bytes_xfered = 0; 1480 kfree(buf); 1481 return 0; 1482 } else if (actual_length < linear_length) { 1483 cmd->error = -EREMOTEIO; 1484 data->bytes_xfered = 0; 1485 kfree(buf); 1486 return 0; 1487 } else { 1488 sg_copy_from_buffer(data->sg, data->sg_len, buf, 1489 linear_length); 1490 kfree(buf); 1491 data->bytes_xfered = vub300->datasize; 1492 return linear_length; 1493 } 1494 } else { 1495 cmd->error = -ENOMEM; 1496 data->bytes_xfered = 0; 1497 return 0; 1498 } 1499 } 1500} 1501 1502static int __command_write_data(struct vub300_mmc_host *vub300, 1503 struct mmc_command *cmd, struct mmc_data *data) 1504{ 1505 /* cmd_mutex is held by vub300_cmndwork_thread */ 1506 unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep); 1507 int linear_length = vub300->datasize; 1508 int modulo_64_length = linear_length & 0x003F; 1509 int modulo_512_length = linear_length & 0x01FF; 1510 if (linear_length < 64) { 1511 int result; 1512 int actual_length; 1513 sg_copy_to_buffer(data->sg, data->sg_len, 1514 vub300->padded_buffer, 1515 sizeof(vub300->padded_buffer)); 1516 memset(vub300->padded_buffer + linear_length, 0, 1517 sizeof(vub300->padded_buffer) - linear_length); 1518 result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer, 1519 sizeof(vub300->padded_buffer), 1520 &actual_length, 2000 + 1521 (sizeof(vub300->padded_buffer) / 1522 16384)); 1523 if (result < 0) { 1524 cmd->error = result; 1525 data->bytes_xfered = 0; 1526 } else { 1527 data->bytes_xfered = vub300->datasize; 1528 } 1529 } else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) || 1530 (vub300->large_usb_packets && (64 > modulo_512_length)) 1531 ) { /* don't you just love these work-rounds */ 1532 int padded_length = ((63 + linear_length) >> 6) << 6; 1533 u8 *buf = kmalloc(padded_length, GFP_KERNEL); 1534 if (buf) { 1535 int result; 1536 int actual_length; 1537 sg_copy_to_buffer(data->sg, data->sg_len, buf, 1538 padded_length); 1539 memset(buf + linear_length, 0, 1540 padded_length - linear_length); 1541 result = 1542 vub300_usb_bulk_msg(vub300, pipe, buf, 1543 padded_length, &actual_length, 1544 2000 + padded_length / 16384); 1545 kfree(buf); 1546 if (result < 0) { 1547 cmd->error = result; 1548 data->bytes_xfered = 0; 1549 } else { 1550 data->bytes_xfered = vub300->datasize; 1551 } 1552 } else { 1553 cmd->error = -ENOMEM; 1554 data->bytes_xfered = 0; 1555 } 1556 } else { /* no data padding required */ 1557 int result; 1558 unsigned char buf[64 * 4]; 1559 sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf)); 1560 result = usb_sg_init(&vub300->sg_request, vub300->udev, 1561 pipe, 0, data->sg, 1562 data->sg_len, 0, GFP_KERNEL); 1563 if (result < 0) { 1564 usb_unlink_urb(vub300->command_out_urb); 1565 usb_unlink_urb(vub300->command_res_urb); 1566 cmd->error = result; 1567 data->bytes_xfered = 0; 1568 } else { 1569 vub300->sg_transfer_timer.expires = 1570 jiffies + msecs_to_jiffies(2000 + 1571 linear_length / 16384); 1572 add_timer(&vub300->sg_transfer_timer); 1573 usb_sg_wait(&vub300->sg_request); 1574 if (cmd->error) { 1575 data->bytes_xfered = 0; 1576 } else { 1577 del_timer(&vub300->sg_transfer_timer); 1578 if (vub300->sg_request.status < 0) { 1579 cmd->error = vub300->sg_request.status; 1580 data->bytes_xfered = 0; 1581 } else { 1582 data->bytes_xfered = vub300->datasize; 1583 } 1584 } 1585 } 1586 } 1587 return linear_length; 1588} 1589 1590static void __vub300_command_response(struct vub300_mmc_host *vub300, 1591 struct mmc_command *cmd, 1592 struct mmc_data *data, int data_length) 1593{ 1594 /* cmd_mutex is held by vub300_cmndwork_thread */ 1595 long respretval; 1596 int msec_timeout = 1000 + data_length / 4; 1597 respretval = 1598 wait_for_completion_timeout(&vub300->command_complete, 1599 msecs_to_jiffies(msec_timeout)); 1600 if (respretval == 0) { /* TIMED OUT */ 1601 /* we don't know which of "out" and "res" if any failed */ 1602 int result; 1603 vub300->usb_timed_out = 1; 1604 usb_kill_urb(vub300->command_out_urb); 1605 usb_kill_urb(vub300->command_res_urb); 1606 cmd->error = -ETIMEDOUT; 1607 result = usb_lock_device_for_reset(vub300->udev, 1608 vub300->interface); 1609 if (result == 0) { 1610 result = usb_reset_device(vub300->udev); 1611 usb_unlock_device(vub300->udev); 1612 } 1613 } else if (respretval < 0) { 1614 /* we don't know which of "out" and "res" if any failed */ 1615 usb_kill_urb(vub300->command_out_urb); 1616 usb_kill_urb(vub300->command_res_urb); 1617 cmd->error = respretval; 1618 } else if (cmd->error) { 1619 /* 1620 * the error occurred sending the command 1621 * or receiving the response 1622 */ 1623 } else if (vub300->command_out_urb->status) { 1624 vub300->usb_transport_fail = vub300->command_out_urb->status; 1625 cmd->error = -EPROTO == vub300->command_out_urb->status ? 1626 -ESHUTDOWN : vub300->command_out_urb->status; 1627 } else if (vub300->command_res_urb->status) { 1628 vub300->usb_transport_fail = vub300->command_res_urb->status; 1629 cmd->error = -EPROTO == vub300->command_res_urb->status ? 1630 -ESHUTDOWN : vub300->command_res_urb->status; 1631 } else if (vub300->resp.common.header_type == 0x00) { 1632 /* 1633 * the command completed successfully 1634 * and there was no piggybacked data 1635 */ 1636 } else if (vub300->resp.common.header_type == RESPONSE_ERROR) { 1637 cmd->error = 1638 vub300_response_error(vub300->resp.error.error_code); 1639 if (vub300->data) 1640 usb_sg_cancel(&vub300->sg_request); 1641 } else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) { 1642 int offloaded_data_length = 1643 vub300->resp.common.header_size - 1644 sizeof(struct sd_register_header); 1645 int register_count = offloaded_data_length >> 3; 1646 int ri = 0; 1647 while (register_count--) { 1648 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); 1649 ri += 1; 1650 } 1651 vub300->resp.common.header_size = 1652 sizeof(struct sd_register_header); 1653 vub300->resp.common.header_type = 0x00; 1654 cmd->error = 0; 1655 } else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) { 1656 int offloaded_data_length = 1657 vub300->resp.common.header_size - 1658 sizeof(struct sd_register_header); 1659 int register_count = offloaded_data_length >> 3; 1660 int ri = 0; 1661 while (register_count--) { 1662 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); 1663 ri += 1; 1664 } 1665 mutex_lock(&vub300->irq_mutex); 1666 if (vub300->irqs_queued) { 1667 vub300->irqs_queued += 1; 1668 } else if (vub300->irq_enabled) { 1669 vub300->irqs_queued += 1; 1670 vub300_queue_poll_work(vub300, 0); 1671 } else { 1672 vub300->irqs_queued += 1; 1673 } 1674 vub300->irq_disabled = 1; 1675 mutex_unlock(&vub300->irq_mutex); 1676 vub300->resp.common.header_size = 1677 sizeof(struct sd_register_header); 1678 vub300->resp.common.header_type = 0x00; 1679 cmd->error = 0; 1680 } else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) { 1681 int offloaded_data_length = 1682 vub300->resp.common.header_size - 1683 sizeof(struct sd_register_header); 1684 int register_count = offloaded_data_length >> 3; 1685 int ri = 0; 1686 while (register_count--) { 1687 add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); 1688 ri += 1; 1689 } 1690 mutex_lock(&vub300->irq_mutex); 1691 if (vub300->irqs_queued) { 1692 vub300->irqs_queued += 1; 1693 } else if (vub300->irq_enabled) { 1694 vub300->irqs_queued += 1; 1695 vub300_queue_poll_work(vub300, 0); 1696 } else { 1697 vub300->irqs_queued += 1; 1698 } 1699 vub300->irq_disabled = 0; 1700 mutex_unlock(&vub300->irq_mutex); 1701 vub300->resp.common.header_size = 1702 sizeof(struct sd_register_header); 1703 vub300->resp.common.header_type = 0x00; 1704 cmd->error = 0; 1705 } else { 1706 cmd->error = -EINVAL; 1707 } 1708} 1709 1710static void construct_request_response(struct vub300_mmc_host *vub300, 1711 struct mmc_command *cmd) 1712{ 1713 int resp_len = vub300->resp_len; 1714 int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1; 1715 int bytes = 3 & less_cmd; 1716 int words = less_cmd >> 2; 1717 u8 *r = vub300->resp.response.command_response; 1718 1719 if (!resp_len) 1720 return; 1721 if (bytes == 3) { 1722 cmd->resp[words] = (r[1 + (words << 2)] << 24) 1723 | (r[2 + (words << 2)] << 16) 1724 | (r[3 + (words << 2)] << 8); 1725 } else if (bytes == 2) { 1726 cmd->resp[words] = (r[1 + (words << 2)] << 24) 1727 | (r[2 + (words << 2)] << 16); 1728 } else if (bytes == 1) { 1729 cmd->resp[words] = (r[1 + (words << 2)] << 24); 1730 } 1731 while (words-- > 0) { 1732 cmd->resp[words] = (r[1 + (words << 2)] << 24) 1733 | (r[2 + (words << 2)] << 16) 1734 | (r[3 + (words << 2)] << 8) 1735 | (r[4 + (words << 2)] << 0); 1736 } 1737 if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0])) 1738 cmd->resp[0] &= 0xFFFFFF00; 1739} 1740 1741/* this thread runs only when there is an upper level command req outstanding */ 1742static void vub300_cmndwork_thread(struct work_struct *work) 1743{ 1744 struct vub300_mmc_host *vub300 = 1745 container_of(work, struct vub300_mmc_host, cmndwork); 1746 if (!vub300->interface) { 1747 kref_put(&vub300->kref, vub300_delete); 1748 return; 1749 } else { 1750 struct mmc_request *req = vub300->req; 1751 struct mmc_command *cmd = vub300->cmd; 1752 struct mmc_data *data = vub300->data; 1753 int data_length; 1754 mutex_lock(&vub300->cmd_mutex); 1755 init_completion(&vub300->command_complete); 1756 if (likely(vub300->vub_name[0]) || !vub300->mmc->card) { 1757 /* 1758 * the name of the EMPTY Pseudo firmware file 1759 * is used as a flag to indicate that the file 1760 * has been already downloaded to the VUB300 chip 1761 */ 1762 } else if (0 == vub300->mmc->card->sdio_funcs) { 1763 strncpy(vub300->vub_name, "SD memory device", 1764 sizeof(vub300->vub_name)); 1765 } else { 1766 download_offload_pseudocode(vub300); 1767 } 1768 send_command(vub300); 1769 if (!data) 1770 data_length = 0; 1771 else if (MMC_DATA_READ & data->flags) 1772 data_length = __command_read_data(vub300, cmd, data); 1773 else 1774 data_length = __command_write_data(vub300, cmd, data); 1775 __vub300_command_response(vub300, cmd, data, data_length); 1776 vub300->req = NULL; 1777 vub300->cmd = NULL; 1778 vub300->data = NULL; 1779 if (cmd->error) { 1780 if (cmd->error == -ENOMEDIUM) 1781 check_vub300_port_status(vub300); 1782 mutex_unlock(&vub300->cmd_mutex); 1783 mmc_request_done(vub300->mmc, req); 1784 kref_put(&vub300->kref, vub300_delete); 1785 return; 1786 } else { 1787 construct_request_response(vub300, cmd); 1788 vub300->resp_len = 0; 1789 mutex_unlock(&vub300->cmd_mutex); 1790 kref_put(&vub300->kref, vub300_delete); 1791 mmc_request_done(vub300->mmc, req); 1792 return; 1793 } 1794 } 1795} 1796 1797static int examine_cyclic_buffer(struct vub300_mmc_host *vub300, 1798 struct mmc_command *cmd, u8 Function) 1799{ 1800 /* cmd_mutex is held by vub300_mmc_request */ 1801 u8 cmd0 = 0xFF & (cmd->arg >> 24); 1802 u8 cmd1 = 0xFF & (cmd->arg >> 16); 1803 u8 cmd2 = 0xFF & (cmd->arg >> 8); 1804 u8 cmd3 = 0xFF & (cmd->arg >> 0); 1805 int first = MAXREGMASK & vub300->fn[Function].offload_point; 1806 struct offload_registers_access *rf = &vub300->fn[Function].reg[first]; 1807 if (cmd0 == rf->command_byte[0] && 1808 cmd1 == rf->command_byte[1] && 1809 cmd2 == rf->command_byte[2] && 1810 cmd3 == rf->command_byte[3]) { 1811 u8 checksum = 0x00; 1812 cmd->resp[1] = checksum << 24; 1813 cmd->resp[0] = (rf->Respond_Byte[0] << 24) 1814 | (rf->Respond_Byte[1] << 16) 1815 | (rf->Respond_Byte[2] << 8) 1816 | (rf->Respond_Byte[3] << 0); 1817 vub300->fn[Function].offload_point += 1; 1818 vub300->fn[Function].offload_count -= 1; 1819 vub300->total_offload_count -= 1; 1820 return 1; 1821 } else { 1822 int delta = 1; /* because it does not match the first one */ 1823 u8 register_count = vub300->fn[Function].offload_count - 1; 1824 u32 register_point = vub300->fn[Function].offload_point + 1; 1825 while (0 < register_count) { 1826 int point = MAXREGMASK & register_point; 1827 struct offload_registers_access *r = 1828 &vub300->fn[Function].reg[point]; 1829 if (cmd0 == r->command_byte[0] && 1830 cmd1 == r->command_byte[1] && 1831 cmd2 == r->command_byte[2] && 1832 cmd3 == r->command_byte[3]) { 1833 u8 checksum = 0x00; 1834 cmd->resp[1] = checksum << 24; 1835 cmd->resp[0] = (r->Respond_Byte[0] << 24) 1836 | (r->Respond_Byte[1] << 16) 1837 | (r->Respond_Byte[2] << 8) 1838 | (r->Respond_Byte[3] << 0); 1839 vub300->fn[Function].offload_point += delta; 1840 vub300->fn[Function].offload_count -= delta; 1841 vub300->total_offload_count -= delta; 1842 return 1; 1843 } else { 1844 register_point += 1; 1845 register_count -= 1; 1846 delta += 1; 1847 continue; 1848 } 1849 } 1850 return 0; 1851 } 1852} 1853 1854static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300, 1855 struct mmc_command *cmd) 1856{ 1857 /* cmd_mutex is held by vub300_mmc_request */ 1858 u8 regs = vub300->dynamic_register_count; 1859 u8 i = 0; 1860 u8 func = FUN(cmd); 1861 u32 reg = REG(cmd); 1862 while (0 < regs--) { 1863 if ((vub300->sdio_register[i].func_num == func) && 1864 (vub300->sdio_register[i].sdio_reg == reg)) { 1865 if (!vub300->sdio_register[i].prepared) { 1866 return 0; 1867 } else if ((0x80000000 & cmd->arg) == 0x80000000) { 1868 /* 1869 * a write to a dynamic register 1870 * nullifies our offloaded value 1871 */ 1872 vub300->sdio_register[i].prepared = 0; 1873 return 0; 1874 } else { 1875 u8 checksum = 0x00; 1876 u8 rsp0 = 0x00; 1877 u8 rsp1 = 0x00; 1878 u8 rsp2 = vub300->sdio_register[i].response; 1879 u8 rsp3 = vub300->sdio_register[i].regvalue; 1880 vub300->sdio_register[i].prepared = 0; 1881 cmd->resp[1] = checksum << 24; 1882 cmd->resp[0] = (rsp0 << 24) 1883 | (rsp1 << 16) 1884 | (rsp2 << 8) 1885 | (rsp3 << 0); 1886 return 1; 1887 } 1888 } else { 1889 i += 1; 1890 continue; 1891 } 1892 } 1893 if (vub300->total_offload_count == 0) 1894 return 0; 1895 else if (vub300->fn[func].offload_count == 0) 1896 return 0; 1897 else 1898 return examine_cyclic_buffer(vub300, cmd, func); 1899} 1900 1901static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req) 1902{ /* NOT irq */ 1903 struct mmc_command *cmd = req->cmd; 1904 struct vub300_mmc_host *vub300 = mmc_priv(mmc); 1905 if (!vub300->interface) { 1906 cmd->error = -ESHUTDOWN; 1907 mmc_request_done(mmc, req); 1908 return; 1909 } else { 1910 struct mmc_data *data = req->data; 1911 if (!vub300->card_powered) { 1912 cmd->error = -ENOMEDIUM; 1913 mmc_request_done(mmc, req); 1914 return; 1915 } 1916 if (!vub300->card_present) { 1917 cmd->error = -ENOMEDIUM; 1918 mmc_request_done(mmc, req); 1919 return; 1920 } 1921 if (vub300->usb_transport_fail) { 1922 cmd->error = vub300->usb_transport_fail; 1923 mmc_request_done(mmc, req); 1924 return; 1925 } 1926 if (!vub300->interface) { 1927 cmd->error = -ENODEV; 1928 mmc_request_done(mmc, req); 1929 return; 1930 } 1931 kref_get(&vub300->kref); 1932 mutex_lock(&vub300->cmd_mutex); 1933 mod_timer(&vub300->inactivity_timer, jiffies + HZ); 1934 /* 1935 * for performance we have to return immediately 1936 * if the requested data has been offloaded 1937 */ 1938 if (cmd->opcode == 52 && 1939 satisfy_request_from_offloaded_data(vub300, cmd)) { 1940 cmd->error = 0; 1941 mutex_unlock(&vub300->cmd_mutex); 1942 kref_put(&vub300->kref, vub300_delete); 1943 mmc_request_done(mmc, req); 1944 return; 1945 } else { 1946 vub300->cmd = cmd; 1947 vub300->req = req; 1948 vub300->data = data; 1949 if (data) 1950 vub300->datasize = data->blksz * data->blocks; 1951 else 1952 vub300->datasize = 0; 1953 vub300_queue_cmnd_work(vub300); 1954 mutex_unlock(&vub300->cmd_mutex); 1955 kref_put(&vub300->kref, vub300_delete); 1956 /* 1957 * the kernel lock diagnostics complain 1958 * if the cmd_mutex * is "passed on" 1959 * to the cmndwork thread, 1960 * so we must release it now 1961 * and re-acquire it in the cmndwork thread 1962 */ 1963 } 1964 } 1965} 1966 1967static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8], 1968 struct mmc_ios *ios) 1969{ 1970 int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */ 1971 int retval; 1972 u32 kHzClock; 1973 if (ios->clock >= 48000000) 1974 kHzClock = 48000; 1975 else if (ios->clock >= 24000000) 1976 kHzClock = 24000; 1977 else if (ios->clock >= 20000000) 1978 kHzClock = 20000; 1979 else if (ios->clock >= 15000000) 1980 kHzClock = 15000; 1981 else if (ios->clock >= 200000) 1982 kHzClock = 200; 1983 else 1984 kHzClock = 0; 1985 { 1986 int i; 1987 u64 c = kHzClock; 1988 for (i = 0; i < buf_array_size; i++) { 1989 buf[i] = c; 1990 c >>= 8; 1991 } 1992 } 1993 retval = 1994 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), 1995 SET_CLOCK_SPEED, 1996 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1997 0x00, 0x00, buf, buf_array_size, 1000); 1998 if (retval != 8) { 1999 dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED" 2000 " %dkHz failed with retval=%d\n", kHzClock, retval); 2001 } else { 2002 dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED" 2003 " %dkHz\n", kHzClock); 2004 } 2005} 2006 2007static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2008{ /* NOT irq */ 2009 struct vub300_mmc_host *vub300 = mmc_priv(mmc); 2010 if (!vub300->interface) 2011 return; 2012 kref_get(&vub300->kref); 2013 mutex_lock(&vub300->cmd_mutex); 2014 if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) { 2015 vub300->card_powered = 0; 2016 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), 2017 SET_SD_POWER, 2018 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 2019 0x0000, 0x0000, NULL, 0, 1000); 2020 /* must wait for the VUB300 u-proc to boot up */ 2021 msleep(600); 2022 } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) { 2023 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), 2024 SET_SD_POWER, 2025 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 2026 0x0001, 0x0000, NULL, 0, 1000); 2027 msleep(600); 2028 vub300->card_powered = 1; 2029 } else if (ios->power_mode == MMC_POWER_ON) { 2030 u8 *buf = kmalloc(8, GFP_KERNEL); 2031 if (buf) { 2032 __set_clock_speed(vub300, buf, ios); 2033 kfree(buf); 2034 } 2035 } else { 2036 /* this should mean no change of state */ 2037 } 2038 mutex_unlock(&vub300->cmd_mutex); 2039 kref_put(&vub300->kref, vub300_delete); 2040} 2041 2042static int vub300_mmc_get_ro(struct mmc_host *mmc) 2043{ 2044 struct vub300_mmc_host *vub300 = mmc_priv(mmc); 2045 return vub300->read_only; 2046} 2047 2048static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) 2049{ /* NOT irq */ 2050 struct vub300_mmc_host *vub300 = mmc_priv(mmc); 2051 if (!vub300->interface) 2052 return; 2053 kref_get(&vub300->kref); 2054 if (enable) { 2055 set_current_state(TASK_RUNNING); 2056 mutex_lock(&vub300->irq_mutex); 2057 if (vub300->irqs_queued) { 2058 vub300->irqs_queued -= 1; 2059 mmc_signal_sdio_irq(vub300->mmc); 2060 } else if (vub300->irq_disabled) { 2061 vub300->irq_disabled = 0; 2062 vub300->irq_enabled = 1; 2063 vub300_queue_poll_work(vub300, 0); 2064 } else if (vub300->irq_enabled) { 2065 /* this should not happen, so we will just ignore it */ 2066 } else { 2067 vub300->irq_enabled = 1; 2068 vub300_queue_poll_work(vub300, 0); 2069 } 2070 mutex_unlock(&vub300->irq_mutex); 2071 set_current_state(TASK_INTERRUPTIBLE); 2072 } else { 2073 vub300->irq_enabled = 0; 2074 } 2075 kref_put(&vub300->kref, vub300_delete); 2076} 2077 2078static const struct mmc_host_ops vub300_mmc_ops = { 2079 .request = vub300_mmc_request, 2080 .set_ios = vub300_mmc_set_ios, 2081 .get_ro = vub300_mmc_get_ro, 2082 .enable_sdio_irq = vub300_enable_sdio_irq, 2083}; 2084 2085static int vub300_probe(struct usb_interface *interface, 2086 const struct usb_device_id *id) 2087{ /* NOT irq */ 2088 struct vub300_mmc_host *vub300; 2089 struct usb_host_interface *iface_desc; 2090 struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); 2091 int i; 2092 int retval = -ENOMEM; 2093 struct urb *command_out_urb; 2094 struct urb *command_res_urb; 2095 struct mmc_host *mmc; 2096 char manufacturer[48]; 2097 char product[32]; 2098 char serial_number[32]; 2099 usb_string(udev, udev->descriptor.iManufacturer, manufacturer, 2100 sizeof(manufacturer)); 2101 usb_string(udev, udev->descriptor.iProduct, product, sizeof(product)); 2102 usb_string(udev, udev->descriptor.iSerialNumber, serial_number, 2103 sizeof(serial_number)); 2104 dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n", 2105 le16_to_cpu(udev->descriptor.idVendor), 2106 le16_to_cpu(udev->descriptor.idProduct), 2107 manufacturer, product, serial_number); 2108 command_out_urb = usb_alloc_urb(0, GFP_KERNEL); 2109 if (!command_out_urb) { 2110 retval = -ENOMEM; 2111 goto error0; 2112 } 2113 command_res_urb = usb_alloc_urb(0, GFP_KERNEL); 2114 if (!command_res_urb) { 2115 retval = -ENOMEM; 2116 goto error1; 2117 } 2118 /* this also allocates memory for our VUB300 mmc host device */ 2119 mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); 2120 if (!mmc) { 2121 retval = -ENOMEM; 2122 dev_err(&udev->dev, "not enough memory for the mmc_host\n"); 2123 goto error4; 2124 } 2125 /* MMC core transfer sizes tunable parameters */ 2126 mmc->caps = 0; 2127 if (!force_1_bit_data_xfers) 2128 mmc->caps |= MMC_CAP_4_BIT_DATA; 2129 if (!force_polling_for_irqs) 2130 mmc->caps |= MMC_CAP_SDIO_IRQ; 2131 mmc->caps &= ~MMC_CAP_NEEDS_POLL; 2132 /* 2133 * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll 2134 * for devices which results in spurious CMD7's being 2135 * issued which stops some SDIO cards from working 2136 */ 2137 if (limit_speed_to_24_MHz) { 2138 mmc->caps |= MMC_CAP_MMC_HIGHSPEED; 2139 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 2140 mmc->f_max = 24000000; 2141 dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n"); 2142 } else { 2143 mmc->caps |= MMC_CAP_MMC_HIGHSPEED; 2144 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 2145 mmc->f_max = 48000000; 2146 } 2147 mmc->f_min = 200000; 2148 mmc->max_blk_count = 511; 2149 mmc->max_blk_size = 512; 2150 mmc->max_segs = 128; 2151 if (force_max_req_size) 2152 mmc->max_req_size = force_max_req_size * 1024; 2153 else 2154 mmc->max_req_size = 64 * 1024; 2155 mmc->max_seg_size = mmc->max_req_size; 2156 mmc->ocr_avail = 0; 2157 mmc->ocr_avail |= MMC_VDD_165_195; 2158 mmc->ocr_avail |= MMC_VDD_20_21; 2159 mmc->ocr_avail |= MMC_VDD_21_22; 2160 mmc->ocr_avail |= MMC_VDD_22_23; 2161 mmc->ocr_avail |= MMC_VDD_23_24; 2162 mmc->ocr_avail |= MMC_VDD_24_25; 2163 mmc->ocr_avail |= MMC_VDD_25_26; 2164 mmc->ocr_avail |= MMC_VDD_26_27; 2165 mmc->ocr_avail |= MMC_VDD_27_28; 2166 mmc->ocr_avail |= MMC_VDD_28_29; 2167 mmc->ocr_avail |= MMC_VDD_29_30; 2168 mmc->ocr_avail |= MMC_VDD_30_31; 2169 mmc->ocr_avail |= MMC_VDD_31_32; 2170 mmc->ocr_avail |= MMC_VDD_32_33; 2171 mmc->ocr_avail |= MMC_VDD_33_34; 2172 mmc->ocr_avail |= MMC_VDD_34_35; 2173 mmc->ocr_avail |= MMC_VDD_35_36; 2174 mmc->ops = &vub300_mmc_ops; 2175 vub300 = mmc_priv(mmc); 2176 vub300->mmc = mmc; 2177 vub300->card_powered = 0; 2178 vub300->bus_width = 0; 2179 vub300->cmnd.head.block_size[0] = 0x00; 2180 vub300->cmnd.head.block_size[1] = 0x00; 2181 vub300->app_spec = 0; 2182 mutex_init(&vub300->cmd_mutex); 2183 mutex_init(&vub300->irq_mutex); 2184 vub300->command_out_urb = command_out_urb; 2185 vub300->command_res_urb = command_res_urb; 2186 vub300->usb_timed_out = 0; 2187 vub300->dynamic_register_count = 0; 2188 2189 for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) { 2190 vub300->fn[i].offload_point = 0; 2191 vub300->fn[i].offload_count = 0; 2192 } 2193 2194 vub300->total_offload_count = 0; 2195 vub300->irq_enabled = 0; 2196 vub300->irq_disabled = 0; 2197 vub300->irqs_queued = 0; 2198 2199 for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++) 2200 vub300->sdio_register[i++].activate = 0; 2201 2202 vub300->udev = udev; 2203 vub300->interface = interface; 2204 vub300->cmnd_res_ep = 0; 2205 vub300->cmnd_out_ep = 0; 2206 vub300->data_inp_ep = 0; 2207 vub300->data_out_ep = 0; 2208 2209 for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) 2210 vub300->fbs[i] = 512; 2211 2212 /* 2213 * set up the endpoint information 2214 * 2215 * use the first pair of bulk-in and bulk-out 2216 * endpoints for Command/Response+Interrupt 2217 * 2218 * use the second pair of bulk-in and bulk-out 2219 * endpoints for Data In/Out 2220 */ 2221 vub300->large_usb_packets = 0; 2222 iface_desc = interface->cur_altsetting; 2223 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 2224 struct usb_endpoint_descriptor *endpoint = 2225 &iface_desc->endpoint[i].desc; 2226 dev_info(&vub300->udev->dev, 2227 "vub300 testing %s EndPoint(%d) %02X\n", 2228 usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" : 2229 usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" : 2230 "UNKNOWN", i, endpoint->bEndpointAddress); 2231 if (endpoint->wMaxPacketSize > 64) 2232 vub300->large_usb_packets = 1; 2233 if (usb_endpoint_is_bulk_in(endpoint)) { 2234 if (!vub300->cmnd_res_ep) { 2235 vub300->cmnd_res_ep = 2236 endpoint->bEndpointAddress; 2237 } else if (!vub300->data_inp_ep) { 2238 vub300->data_inp_ep = 2239 endpoint->bEndpointAddress; 2240 } else { 2241 dev_warn(&vub300->udev->dev, 2242 "ignoring" 2243 " unexpected bulk_in endpoint"); 2244 } 2245 } else if (usb_endpoint_is_bulk_out(endpoint)) { 2246 if (!vub300->cmnd_out_ep) { 2247 vub300->cmnd_out_ep = 2248 endpoint->bEndpointAddress; 2249 } else if (!vub300->data_out_ep) { 2250 vub300->data_out_ep = 2251 endpoint->bEndpointAddress; 2252 } else { 2253 dev_warn(&vub300->udev->dev, 2254 "ignoring" 2255 " unexpected bulk_out endpoint"); 2256 } 2257 } else { 2258 dev_warn(&vub300->udev->dev, 2259 "vub300 ignoring EndPoint(%d) %02X", i, 2260 endpoint->bEndpointAddress); 2261 } 2262 } 2263 if (vub300->cmnd_res_ep && vub300->cmnd_out_ep && 2264 vub300->data_inp_ep && vub300->data_out_ep) { 2265 dev_info(&vub300->udev->dev, 2266 "vub300 %s packets" 2267 " using EndPoints %02X %02X %02X %02X\n", 2268 vub300->large_usb_packets ? "LARGE" : "SMALL", 2269 vub300->cmnd_out_ep, vub300->cmnd_res_ep, 2270 vub300->data_out_ep, vub300->data_inp_ep); 2271 /* we have the expected EndPoints */ 2272 } else { 2273 dev_err(&vub300->udev->dev, 2274 "Could not find two sets of bulk-in/out endpoint pairs\n"); 2275 retval = -EINVAL; 2276 goto error5; 2277 } 2278 retval = 2279 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), 2280 GET_HC_INF0, 2281 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 2282 0x0000, 0x0000, &vub300->hc_info, 2283 sizeof(vub300->hc_info), 1000); 2284 if (retval < 0) 2285 goto error5; 2286 retval = 2287 usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), 2288 SET_ROM_WAIT_STATES, 2289 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 2290 firmware_rom_wait_states, 0x0000, NULL, 0, 1000); 2291 if (retval < 0) 2292 goto error5; 2293 dev_info(&vub300->udev->dev, 2294 "operating_mode = %s %s %d MHz %s %d byte USB packets\n", 2295 (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL", 2296 (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit", 2297 mmc->f_max / 1000000, 2298 pad_input_to_usb_pkt ? "padding input data to" : "with", 2299 vub300->large_usb_packets ? 512 : 64); 2300 retval = 2301 usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), 2302 GET_SYSTEM_PORT_STATUS, 2303 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 2304 0x0000, 0x0000, &vub300->system_port_status, 2305 sizeof(vub300->system_port_status), 1000); 2306 if (retval < 0) { 2307 goto error5; 2308 } else if (sizeof(vub300->system_port_status) == retval) { 2309 vub300->card_present = 2310 (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; 2311 vub300->read_only = 2312 (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; 2313 } else { 2314 retval = -EINVAL; 2315 goto error5; 2316 } 2317 usb_set_intfdata(interface, vub300); 2318 INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread); 2319 INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread); 2320 INIT_WORK(&vub300->deadwork, vub300_deadwork_thread); 2321 kref_init(&vub300->kref); 2322 timer_setup(&vub300->sg_transfer_timer, vub300_sg_timed_out, 0); 2323 kref_get(&vub300->kref); 2324 timer_setup(&vub300->inactivity_timer, 2325 vub300_inactivity_timer_expired, 0); 2326 vub300->inactivity_timer.expires = jiffies + HZ; 2327 add_timer(&vub300->inactivity_timer); 2328 if (vub300->card_present) 2329 dev_info(&vub300->udev->dev, 2330 "USB vub300 remote SDIO host controller[%d]" 2331 "connected with SD/SDIO card inserted\n", 2332 interface_to_InterfaceNumber(interface)); 2333 else 2334 dev_info(&vub300->udev->dev, 2335 "USB vub300 remote SDIO host controller[%d]" 2336 "connected with no SD/SDIO card inserted\n", 2337 interface_to_InterfaceNumber(interface)); 2338 retval = mmc_add_host(mmc); 2339 if (retval) 2340 goto error6; 2341 2342 return 0; 2343error6: 2344 del_timer_sync(&vub300->inactivity_timer); 2345error5: 2346 mmc_free_host(mmc); 2347 /* 2348 * and hence also frees vub300 2349 * which is contained at the end of struct mmc 2350 */ 2351error4: 2352 usb_free_urb(command_res_urb); 2353error1: 2354 usb_free_urb(command_out_urb); 2355error0: 2356 usb_put_dev(udev); 2357 return retval; 2358} 2359 2360static void vub300_disconnect(struct usb_interface *interface) 2361{ /* NOT irq */ 2362 struct vub300_mmc_host *vub300 = usb_get_intfdata(interface); 2363 if (!vub300 || !vub300->mmc) { 2364 return; 2365 } else { 2366 struct mmc_host *mmc = vub300->mmc; 2367 if (!vub300->mmc) { 2368 return; 2369 } else { 2370 int ifnum = interface_to_InterfaceNumber(interface); 2371 usb_set_intfdata(interface, NULL); 2372 /* prevent more I/O from starting */ 2373 vub300->interface = NULL; 2374 kref_put(&vub300->kref, vub300_delete); 2375 mmc_remove_host(mmc); 2376 pr_info("USB vub300 remote SDIO host controller[%d]" 2377 " now disconnected", ifnum); 2378 return; 2379 } 2380 } 2381} 2382 2383#ifdef CONFIG_PM 2384static int vub300_suspend(struct usb_interface *intf, pm_message_t message) 2385{ 2386 return 0; 2387} 2388 2389static int vub300_resume(struct usb_interface *intf) 2390{ 2391 return 0; 2392} 2393#else 2394#define vub300_suspend NULL 2395#define vub300_resume NULL 2396#endif 2397static int vub300_pre_reset(struct usb_interface *intf) 2398{ /* NOT irq */ 2399 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); 2400 mutex_lock(&vub300->cmd_mutex); 2401 return 0; 2402} 2403 2404static int vub300_post_reset(struct usb_interface *intf) 2405{ /* NOT irq */ 2406 struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); 2407 /* we are sure no URBs are active - no locking needed */ 2408 vub300->errors = -EPIPE; 2409 mutex_unlock(&vub300->cmd_mutex); 2410 return 0; 2411} 2412 2413static struct usb_driver vub300_driver = { 2414 .name = "vub300", 2415 .probe = vub300_probe, 2416 .disconnect = vub300_disconnect, 2417 .suspend = vub300_suspend, 2418 .resume = vub300_resume, 2419 .pre_reset = vub300_pre_reset, 2420 .post_reset = vub300_post_reset, 2421 .id_table = vub300_table, 2422 .supports_autosuspend = 1, 2423}; 2424 2425static int __init vub300_init(void) 2426{ /* NOT irq */ 2427 int result; 2428 2429 pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X", 2430 firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout); 2431 cmndworkqueue = create_singlethread_workqueue("kvub300c"); 2432 if (!cmndworkqueue) { 2433 pr_err("not enough memory for the REQUEST workqueue"); 2434 result = -ENOMEM; 2435 goto out1; 2436 } 2437 pollworkqueue = create_singlethread_workqueue("kvub300p"); 2438 if (!pollworkqueue) { 2439 pr_err("not enough memory for the IRQPOLL workqueue"); 2440 result = -ENOMEM; 2441 goto out2; 2442 } 2443 deadworkqueue = create_singlethread_workqueue("kvub300d"); 2444 if (!deadworkqueue) { 2445 pr_err("not enough memory for the EXPIRED workqueue"); 2446 result = -ENOMEM; 2447 goto out3; 2448 } 2449 result = usb_register(&vub300_driver); 2450 if (result) { 2451 pr_err("usb_register failed. Error number %d", result); 2452 goto out4; 2453 } 2454 return 0; 2455out4: 2456 destroy_workqueue(deadworkqueue); 2457out3: 2458 destroy_workqueue(pollworkqueue); 2459out2: 2460 destroy_workqueue(cmndworkqueue); 2461out1: 2462 return result; 2463} 2464 2465static void __exit vub300_exit(void) 2466{ 2467 usb_deregister(&vub300_driver); 2468 flush_workqueue(cmndworkqueue); 2469 flush_workqueue(pollworkqueue); 2470 flush_workqueue(deadworkqueue); 2471 destroy_workqueue(cmndworkqueue); 2472 destroy_workqueue(pollworkqueue); 2473 destroy_workqueue(deadworkqueue); 2474} 2475 2476module_init(vub300_init); 2477module_exit(vub300_exit); 2478 2479MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>"); 2480MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver"); 2481MODULE_LICENSE("GPL"); 2482