1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2002 Intersil Americas Inc. 4 * Copyright 2004 Jens Maurer <Jens.Maurer@gmx.net> 5 */ 6 7#include <linux/netdevice.h> 8#include <linux/module.h> 9#include <linux/pci.h> 10#include <linux/sched.h> 11#include <linux/slab.h> 12 13#include <asm/io.h> 14#include <linux/if_arp.h> 15 16#include "prismcompat.h" 17#include "isl_38xx.h" 18#include "islpci_mgt.h" 19#include "isl_oid.h" /* additional types and defs for isl38xx fw */ 20#include "isl_ioctl.h" 21 22#include <net/iw_handler.h> 23 24/****************************************************************************** 25 Global variable definition section 26******************************************************************************/ 27int pc_debug = VERBOSE; 28module_param(pc_debug, int, 0); 29 30/****************************************************************************** 31 Driver general functions 32******************************************************************************/ 33#if VERBOSE > SHOW_ERROR_MESSAGES 34void 35display_buffer(char *buffer, int length) 36{ 37 if ((pc_debug & SHOW_BUFFER_CONTENTS) == 0) 38 return; 39 40 while (length > 0) { 41 printk("[%02x]", *buffer & 255); 42 length--; 43 buffer++; 44 } 45 46 printk("\n"); 47} 48#endif 49 50/***************************************************************************** 51 Queue handling for management frames 52******************************************************************************/ 53 54/* 55 * Helper function to create a PIMFOR management frame header. 56 */ 57static void 58pimfor_encode_header(int operation, u32 oid, u32 length, pimfor_header_t *h) 59{ 60 h->version = PIMFOR_VERSION; 61 h->operation = operation; 62 h->device_id = PIMFOR_DEV_ID_MHLI_MIB; 63 h->flags = 0; 64 h->oid = cpu_to_be32(oid); 65 h->length = cpu_to_be32(length); 66} 67 68/* 69 * Helper function to analyze a PIMFOR management frame header. 70 */ 71static pimfor_header_t * 72pimfor_decode_header(void *data, int len) 73{ 74 pimfor_header_t *h = data; 75 76 while ((void *) h < data + len) { 77 if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) { 78 le32_to_cpus(&h->oid); 79 le32_to_cpus(&h->length); 80 } else { 81 be32_to_cpus(&h->oid); 82 be32_to_cpus(&h->length); 83 } 84 if (h->oid != OID_INL_TUNNEL) 85 return h; 86 h++; 87 } 88 return NULL; 89} 90 91/* 92 * Fill the receive queue for management frames with fresh buffers. 93 */ 94int 95islpci_mgmt_rx_fill(struct net_device *ndev) 96{ 97 islpci_private *priv = netdev_priv(ndev); 98 isl38xx_control_block *cb = /* volatile not needed */ 99 (isl38xx_control_block *) priv->control_block; 100 u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]); 101 102#if VERBOSE > SHOW_ERROR_MESSAGES 103 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n"); 104#endif 105 106 while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) { 107 u32 index = curr % ISL38XX_CB_MGMT_QSIZE; 108 struct islpci_membuf *buf = &priv->mgmt_rx[index]; 109 isl38xx_fragment *frag = &cb->rx_data_mgmt[index]; 110 111 if (buf->mem == NULL) { 112 buf->mem = kmalloc(MGMT_FRAME_SIZE, GFP_ATOMIC); 113 if (!buf->mem) 114 return -ENOMEM; 115 buf->size = MGMT_FRAME_SIZE; 116 } 117 if (buf->pci_addr == 0) { 118 buf->pci_addr = dma_map_single(&priv->pdev->dev, 119 buf->mem, 120 MGMT_FRAME_SIZE, 121 DMA_FROM_DEVICE); 122 if (dma_mapping_error(&priv->pdev->dev, buf->pci_addr)) { 123 printk(KERN_WARNING 124 "Failed to make memory DMA'able.\n"); 125 return -ENOMEM; 126 } 127 } 128 129 /* be safe: always reset control block information */ 130 frag->size = cpu_to_le16(MGMT_FRAME_SIZE); 131 frag->flags = 0; 132 frag->address = cpu_to_le32(buf->pci_addr); 133 curr++; 134 135 /* The fragment address in the control block must have 136 * been written before announcing the frame buffer to 137 * device */ 138 wmb(); 139 cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr); 140 } 141 return 0; 142} 143 144/* 145 * Create and transmit a management frame using "operation" and "oid", 146 * with arguments data/length. 147 * We either return an error and free the frame, or we return 0 and 148 * islpci_mgt_cleanup_transmit() frees the frame in the tx-done 149 * interrupt. 150 */ 151static int 152islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid, 153 void *data, int length) 154{ 155 islpci_private *priv = netdev_priv(ndev); 156 isl38xx_control_block *cb = 157 (isl38xx_control_block *) priv->control_block; 158 void *p; 159 int err = -EINVAL; 160 unsigned long flags; 161 isl38xx_fragment *frag; 162 struct islpci_membuf buf; 163 u32 curr_frag; 164 int index; 165 int frag_len = length + PIMFOR_HEADER_SIZE; 166 167#if VERBOSE > SHOW_ERROR_MESSAGES 168 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_transmit\n"); 169#endif 170 171 if (frag_len > MGMT_FRAME_SIZE) { 172 printk(KERN_DEBUG "%s: mgmt frame too large %d\n", 173 ndev->name, frag_len); 174 goto error; 175 } 176 177 err = -ENOMEM; 178 p = buf.mem = kmalloc(frag_len, GFP_KERNEL); 179 if (!buf.mem) 180 goto error; 181 182 buf.size = frag_len; 183 184 /* create the header directly in the fragment data area */ 185 pimfor_encode_header(operation, oid, length, (pimfor_header_t *) p); 186 p += PIMFOR_HEADER_SIZE; 187 188 if (data) 189 memcpy(p, data, length); 190 else 191 memset(p, 0, length); 192 193#if VERBOSE > SHOW_ERROR_MESSAGES 194 { 195 pimfor_header_t *h = buf.mem; 196 DEBUG(SHOW_PIMFOR_FRAMES, 197 "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n", 198 h->operation, oid, h->device_id, h->flags, length); 199 200 /* display the buffer contents for debugging */ 201 display_buffer((char *) h, sizeof (pimfor_header_t)); 202 display_buffer(p, length); 203 } 204#endif 205 206 err = -ENOMEM; 207 buf.pci_addr = dma_map_single(&priv->pdev->dev, buf.mem, frag_len, 208 DMA_TO_DEVICE); 209 if (dma_mapping_error(&priv->pdev->dev, buf.pci_addr)) { 210 printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n", 211 ndev->name); 212 goto error_free; 213 } 214 215 /* Protect the control block modifications against interrupts. */ 216 spin_lock_irqsave(&priv->slock, flags); 217 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ]); 218 if (curr_frag - priv->index_mgmt_tx >= ISL38XX_CB_MGMT_QSIZE) { 219 printk(KERN_WARNING "%s: mgmt tx queue is still full\n", 220 ndev->name); 221 goto error_unlock; 222 } 223 224 /* commit the frame to the tx device queue */ 225 index = curr_frag % ISL38XX_CB_MGMT_QSIZE; 226 priv->mgmt_tx[index] = buf; 227 frag = &cb->tx_data_mgmt[index]; 228 frag->size = cpu_to_le16(frag_len); 229 frag->flags = 0; /* for any other than the last fragment, set to 1 */ 230 frag->address = cpu_to_le32(buf.pci_addr); 231 232 /* The fragment address in the control block must have 233 * been written before announcing the frame buffer to 234 * device */ 235 wmb(); 236 cb->driver_curr_frag[ISL38XX_CB_TX_MGMTQ] = cpu_to_le32(curr_frag + 1); 237 spin_unlock_irqrestore(&priv->slock, flags); 238 239 /* trigger the device */ 240 islpci_trigger(priv); 241 return 0; 242 243 error_unlock: 244 spin_unlock_irqrestore(&priv->slock, flags); 245 error_free: 246 kfree(buf.mem); 247 error: 248 return err; 249} 250 251/* 252 * Receive a management frame from the device. 253 * This can be an arbitrary number of traps, and at most one response 254 * frame for a previous request sent via islpci_mgt_transmit(). 255 */ 256int 257islpci_mgt_receive(struct net_device *ndev) 258{ 259 islpci_private *priv = netdev_priv(ndev); 260 isl38xx_control_block *cb = 261 (isl38xx_control_block *) priv->control_block; 262 u32 curr_frag; 263 264#if VERBOSE > SHOW_ERROR_MESSAGES 265 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n"); 266#endif 267 268 /* Only once per interrupt, determine fragment range to 269 * process. This avoids an endless loop (i.e. lockup) if 270 * frames come in faster than we can process them. */ 271 curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]); 272 barrier(); 273 274 for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) { 275 pimfor_header_t *header; 276 u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE; 277 struct islpci_membuf *buf = &priv->mgmt_rx[index]; 278 u16 frag_len; 279 int size; 280 struct islpci_mgmtframe *frame; 281 282 /* I have no idea (and no documentation) if flags != 0 283 * is possible. Drop the frame, reuse the buffer. */ 284 if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) { 285 printk(KERN_WARNING "%s: unknown flags 0x%04x\n", 286 ndev->name, 287 le16_to_cpu(cb->rx_data_mgmt[index].flags)); 288 continue; 289 } 290 291 /* The device only returns the size of the header(s) here. */ 292 frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size); 293 294 /* 295 * We appear to have no way to tell the device the 296 * size of a receive buffer. Thus, if this check 297 * triggers, we likely have kernel heap corruption. */ 298 if (frag_len > MGMT_FRAME_SIZE) { 299 printk(KERN_WARNING 300 "%s: Bogus packet size of %d (%#x).\n", 301 ndev->name, frag_len, frag_len); 302 frag_len = MGMT_FRAME_SIZE; 303 } 304 305 /* Ensure the results of device DMA are visible to the CPU. */ 306 dma_sync_single_for_cpu(&priv->pdev->dev, buf->pci_addr, 307 buf->size, DMA_FROM_DEVICE); 308 309 /* Perform endianess conversion for PIMFOR header in-place. */ 310 header = pimfor_decode_header(buf->mem, frag_len); 311 if (!header) { 312 printk(KERN_WARNING "%s: no PIMFOR header found\n", 313 ndev->name); 314 continue; 315 } 316 317 /* The device ID from the PIMFOR packet received from 318 * the MVC is always 0. We forward a sensible device_id. 319 * Not that anyone upstream would care... */ 320 header->device_id = priv->ndev->ifindex; 321 322#if VERBOSE > SHOW_ERROR_MESSAGES 323 DEBUG(SHOW_PIMFOR_FRAMES, 324 "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n", 325 header->operation, header->oid, header->device_id, 326 header->flags, header->length); 327 328 /* display the buffer contents for debugging */ 329 display_buffer((char *) header, PIMFOR_HEADER_SIZE); 330 display_buffer((char *) header + PIMFOR_HEADER_SIZE, 331 header->length); 332#endif 333 334 /* nobody sends these */ 335 if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) { 336 printk(KERN_DEBUG 337 "%s: errant PIMFOR application frame\n", 338 ndev->name); 339 continue; 340 } 341 342 /* Determine frame size, skipping OID_INL_TUNNEL headers. */ 343 size = PIMFOR_HEADER_SIZE + header->length; 344 frame = kmalloc(sizeof(struct islpci_mgmtframe) + size, 345 GFP_ATOMIC); 346 if (!frame) 347 continue; 348 349 frame->ndev = ndev; 350 memcpy(&frame->buf, header, size); 351 frame->header = (pimfor_header_t *) frame->buf; 352 frame->data = frame->buf + PIMFOR_HEADER_SIZE; 353 354#if VERBOSE > SHOW_ERROR_MESSAGES 355 DEBUG(SHOW_PIMFOR_FRAMES, 356 "frame: header: %p, data: %p, size: %d\n", 357 frame->header, frame->data, size); 358#endif 359 360 if (header->operation == PIMFOR_OP_TRAP) { 361#if VERBOSE > SHOW_ERROR_MESSAGES 362 printk(KERN_DEBUG 363 "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n", 364 header->oid, header->device_id, header->flags, 365 header->length); 366#endif 367 368 /* Create work to handle trap out of interrupt 369 * context. */ 370 INIT_WORK(&frame->ws, prism54_process_trap); 371 schedule_work(&frame->ws); 372 373 } else { 374 /* Signal the one waiting process that a response 375 * has been received. */ 376 if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) { 377 printk(KERN_WARNING 378 "%s: mgmt response not collected\n", 379 ndev->name); 380 kfree(frame); 381 } 382#if VERBOSE > SHOW_ERROR_MESSAGES 383 DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n"); 384#endif 385 wake_up(&priv->mgmt_wqueue); 386 } 387 388 } 389 390 return 0; 391} 392 393/* 394 * Cleanup the transmit queue by freeing all frames handled by the device. 395 */ 396void 397islpci_mgt_cleanup_transmit(struct net_device *ndev) 398{ 399 islpci_private *priv = netdev_priv(ndev); 400 isl38xx_control_block *cb = /* volatile not needed */ 401 (isl38xx_control_block *) priv->control_block; 402 u32 curr_frag; 403 404#if VERBOSE > SHOW_ERROR_MESSAGES 405 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_cleanup_transmit\n"); 406#endif 407 408 /* Only once per cleanup, determine fragment range to 409 * process. This avoids an endless loop (i.e. lockup) if 410 * the device became confused, incrementing device_curr_frag 411 * rapidly. */ 412 curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_TX_MGMTQ]); 413 barrier(); 414 415 for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) { 416 int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE; 417 struct islpci_membuf *buf = &priv->mgmt_tx[index]; 418 dma_unmap_single(&priv->pdev->dev, buf->pci_addr, buf->size, 419 DMA_TO_DEVICE); 420 buf->pci_addr = 0; 421 kfree(buf->mem); 422 buf->mem = NULL; 423 buf->size = 0; 424 } 425} 426 427/* 428 * Perform one request-response transaction to the device. 429 */ 430int 431islpci_mgt_transaction(struct net_device *ndev, 432 int operation, unsigned long oid, 433 void *senddata, int sendlen, 434 struct islpci_mgmtframe **recvframe) 435{ 436 islpci_private *priv = netdev_priv(ndev); 437 const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10); 438 long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies; 439 int err; 440 DEFINE_WAIT(wait); 441 442 *recvframe = NULL; 443 444 if (mutex_lock_interruptible(&priv->mgmt_lock)) 445 return -ERESTARTSYS; 446 447 prepare_to_wait(&priv->mgmt_wqueue, &wait, TASK_UNINTERRUPTIBLE); 448 err = islpci_mgt_transmit(ndev, operation, oid, senddata, sendlen); 449 if (err) 450 goto out; 451 452 err = -ETIMEDOUT; 453 while (timeout_left > 0) { 454 int timeleft; 455 struct islpci_mgmtframe *frame; 456 457 timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies); 458 frame = xchg(&priv->mgmt_received, NULL); 459 if (frame) { 460 if (frame->header->oid == oid) { 461 *recvframe = frame; 462 err = 0; 463 goto out; 464 } else { 465 printk(KERN_DEBUG 466 "%s: expecting oid 0x%x, received 0x%x.\n", 467 ndev->name, (unsigned int) oid, 468 frame->header->oid); 469 kfree(frame); 470 frame = NULL; 471 } 472 } 473 if (timeleft == 0) { 474 printk(KERN_DEBUG 475 "%s: timeout waiting for mgmt response %lu, " 476 "triggering device\n", 477 ndev->name, timeout_left); 478 islpci_trigger(priv); 479 } 480 timeout_left += timeleft - wait_cycle_jiffies; 481 } 482 printk(KERN_WARNING "%s: timeout waiting for mgmt response\n", 483 ndev->name); 484 485 /* TODO: we should reset the device here */ 486 out: 487 finish_wait(&priv->mgmt_wqueue, &wait); 488 mutex_unlock(&priv->mgmt_lock); 489 return err; 490} 491 492