1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2/* 3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. 4 */ 5 6#include "ena_com.h" 7 8/*****************************************************************************/ 9/*****************************************************************************/ 10 11/* Timeout in micro-sec */ 12#define ADMIN_CMD_TIMEOUT_US (3000000) 13 14#define ENA_ASYNC_QUEUE_DEPTH 16 15#define ENA_ADMIN_QUEUE_DEPTH 32 16 17 18#define ENA_CTRL_MAJOR 0 19#define ENA_CTRL_MINOR 0 20#define ENA_CTRL_SUB_MINOR 1 21 22#define MIN_ENA_CTRL_VER \ 23 (((ENA_CTRL_MAJOR) << \ 24 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \ 25 ((ENA_CTRL_MINOR) << \ 26 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \ 27 (ENA_CTRL_SUB_MINOR)) 28 29#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x))) 30#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32)) 31 32#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 33 34#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 35 36#define ENA_REGS_ADMIN_INTR_MASK 1 37 38#define ENA_MAX_BACKOFF_DELAY_EXP 16U 39 40#define ENA_MIN_ADMIN_POLL_US 100 41 42#define ENA_MAX_ADMIN_POLL_US 5000 43 44/*****************************************************************************/ 45/*****************************************************************************/ 46/*****************************************************************************/ 47 48enum ena_cmd_status { 49 ENA_CMD_SUBMITTED, 50 ENA_CMD_COMPLETED, 51 /* Abort - canceled by the driver */ 52 ENA_CMD_ABORTED, 53}; 54 55struct ena_comp_ctx { 56 struct completion wait_event; 57 struct ena_admin_acq_entry *user_cqe; 58 u32 comp_size; 59 enum ena_cmd_status status; 60 /* status from the device */ 61 u8 comp_status; 62 u8 cmd_opcode; 63 bool occupied; 64}; 65 66struct ena_com_stats_ctx { 67 struct ena_admin_aq_get_stats_cmd get_cmd; 68 struct ena_admin_acq_get_stats_resp get_resp; 69}; 70 71static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, 72 struct ena_common_mem_addr *ena_addr, 73 dma_addr_t addr) 74{ 75 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { 76 pr_err("DMA address has more bits that the device supports\n"); 77 return -EINVAL; 78 } 79 80 ena_addr->mem_addr_low = lower_32_bits(addr); 81 ena_addr->mem_addr_high = (u16)upper_32_bits(addr); 82 83 return 0; 84} 85 86static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) 87{ 88 struct ena_com_admin_sq *sq = &admin_queue->sq; 89 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); 90 91 sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, 92 &sq->dma_addr, GFP_KERNEL); 93 94 if (!sq->entries) { 95 pr_err("Memory allocation failed\n"); 96 return -ENOMEM; 97 } 98 99 sq->head = 0; 100 sq->tail = 0; 101 sq->phase = 1; 102 103 sq->db_addr = NULL; 104 105 return 0; 106} 107 108static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) 109{ 110 struct ena_com_admin_cq *cq = &admin_queue->cq; 111 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); 112 113 cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, 114 &cq->dma_addr, GFP_KERNEL); 115 116 if (!cq->entries) { 117 pr_err("Memory allocation failed\n"); 118 return -ENOMEM; 119 } 120 121 cq->head = 0; 122 cq->phase = 1; 123 124 return 0; 125} 126 127static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, 128 struct ena_aenq_handlers *aenq_handlers) 129{ 130 struct ena_com_aenq *aenq = &ena_dev->aenq; 131 u32 addr_low, addr_high, aenq_caps; 132 u16 size; 133 134 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 135 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 136 aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, 137 &aenq->dma_addr, GFP_KERNEL); 138 139 if (!aenq->entries) { 140 pr_err("Memory allocation failed\n"); 141 return -ENOMEM; 142 } 143 144 aenq->head = aenq->q_depth; 145 aenq->phase = 1; 146 147 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); 148 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); 149 150 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); 151 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); 152 153 aenq_caps = 0; 154 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; 155 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) 156 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & 157 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; 158 writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); 159 160 if (unlikely(!aenq_handlers)) { 161 pr_err("AENQ handlers pointer is NULL\n"); 162 return -EINVAL; 163 } 164 165 aenq->aenq_handlers = aenq_handlers; 166 167 return 0; 168} 169 170static void comp_ctxt_release(struct ena_com_admin_queue *queue, 171 struct ena_comp_ctx *comp_ctx) 172{ 173 comp_ctx->occupied = false; 174 atomic_dec(&queue->outstanding_cmds); 175} 176 177static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue, 178 u16 command_id, bool capture) 179{ 180 if (unlikely(command_id >= admin_queue->q_depth)) { 181 pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n", 182 command_id, admin_queue->q_depth); 183 return NULL; 184 } 185 186 if (unlikely(!admin_queue->comp_ctx)) { 187 pr_err("Completion context is NULL\n"); 188 return NULL; 189 } 190 191 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { 192 pr_err("Completion context is occupied\n"); 193 return NULL; 194 } 195 196 if (capture) { 197 atomic_inc(&admin_queue->outstanding_cmds); 198 admin_queue->comp_ctx[command_id].occupied = true; 199 } 200 201 return &admin_queue->comp_ctx[command_id]; 202} 203 204static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 205 struct ena_admin_aq_entry *cmd, 206 size_t cmd_size_in_bytes, 207 struct ena_admin_acq_entry *comp, 208 size_t comp_size_in_bytes) 209{ 210 struct ena_comp_ctx *comp_ctx; 211 u16 tail_masked, cmd_id; 212 u16 queue_size_mask; 213 u16 cnt; 214 215 queue_size_mask = admin_queue->q_depth - 1; 216 217 tail_masked = admin_queue->sq.tail & queue_size_mask; 218 219 /* In case of queue FULL */ 220 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); 221 if (cnt >= admin_queue->q_depth) { 222 pr_debug("Admin queue is full.\n"); 223 admin_queue->stats.out_of_space++; 224 return ERR_PTR(-ENOSPC); 225 } 226 227 cmd_id = admin_queue->curr_cmd_id; 228 229 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & 230 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK; 231 232 cmd->aq_common_descriptor.command_id |= cmd_id & 233 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK; 234 235 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true); 236 if (unlikely(!comp_ctx)) 237 return ERR_PTR(-EINVAL); 238 239 comp_ctx->status = ENA_CMD_SUBMITTED; 240 comp_ctx->comp_size = (u32)comp_size_in_bytes; 241 comp_ctx->user_cqe = comp; 242 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode; 243 244 reinit_completion(&comp_ctx->wait_event); 245 246 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); 247 248 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) & 249 queue_size_mask; 250 251 admin_queue->sq.tail++; 252 admin_queue->stats.submitted_cmd++; 253 254 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) 255 admin_queue->sq.phase = !admin_queue->sq.phase; 256 257 writel(admin_queue->sq.tail, admin_queue->sq.db_addr); 258 259 return comp_ctx; 260} 261 262static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) 263{ 264 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); 265 struct ena_comp_ctx *comp_ctx; 266 u16 i; 267 268 admin_queue->comp_ctx = 269 devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); 270 if (unlikely(!admin_queue->comp_ctx)) { 271 pr_err("Memory allocation failed\n"); 272 return -ENOMEM; 273 } 274 275 for (i = 0; i < admin_queue->q_depth; i++) { 276 comp_ctx = get_comp_ctxt(admin_queue, i, false); 277 if (comp_ctx) 278 init_completion(&comp_ctx->wait_event); 279 } 280 281 return 0; 282} 283 284static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, 285 struct ena_admin_aq_entry *cmd, 286 size_t cmd_size_in_bytes, 287 struct ena_admin_acq_entry *comp, 288 size_t comp_size_in_bytes) 289{ 290 unsigned long flags = 0; 291 struct ena_comp_ctx *comp_ctx; 292 293 spin_lock_irqsave(&admin_queue->q_lock, flags); 294 if (unlikely(!admin_queue->running_state)) { 295 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 296 return ERR_PTR(-ENODEV); 297 } 298 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd, 299 cmd_size_in_bytes, 300 comp, 301 comp_size_in_bytes); 302 if (IS_ERR(comp_ctx)) 303 admin_queue->running_state = false; 304 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 305 306 return comp_ctx; 307} 308 309static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 310 struct ena_com_create_io_ctx *ctx, 311 struct ena_com_io_sq *io_sq) 312{ 313 size_t size; 314 int dev_node = 0; 315 316 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 317 318 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; 319 io_sq->desc_entry_size = 320 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 321 sizeof(struct ena_eth_io_tx_desc) : 322 sizeof(struct ena_eth_io_rx_desc); 323 324 size = io_sq->desc_entry_size * io_sq->q_depth; 325 326 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 327 dev_node = dev_to_node(ena_dev->dmadev); 328 set_dev_node(ena_dev->dmadev, ctx->numa_node); 329 io_sq->desc_addr.virt_addr = 330 dma_alloc_coherent(ena_dev->dmadev, size, 331 &io_sq->desc_addr.phys_addr, 332 GFP_KERNEL); 333 set_dev_node(ena_dev->dmadev, dev_node); 334 if (!io_sq->desc_addr.virt_addr) { 335 io_sq->desc_addr.virt_addr = 336 dma_alloc_coherent(ena_dev->dmadev, size, 337 &io_sq->desc_addr.phys_addr, 338 GFP_KERNEL); 339 } 340 341 if (!io_sq->desc_addr.virt_addr) { 342 pr_err("Memory allocation failed\n"); 343 return -ENOMEM; 344 } 345 } 346 347 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 348 /* Allocate bounce buffers */ 349 io_sq->bounce_buf_ctrl.buffer_size = 350 ena_dev->llq_info.desc_list_entry_size; 351 io_sq->bounce_buf_ctrl.buffers_num = 352 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; 353 io_sq->bounce_buf_ctrl.next_to_use = 0; 354 355 size = io_sq->bounce_buf_ctrl.buffer_size * 356 io_sq->bounce_buf_ctrl.buffers_num; 357 358 dev_node = dev_to_node(ena_dev->dmadev); 359 set_dev_node(ena_dev->dmadev, ctx->numa_node); 360 io_sq->bounce_buf_ctrl.base_buffer = 361 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 362 set_dev_node(ena_dev->dmadev, dev_node); 363 if (!io_sq->bounce_buf_ctrl.base_buffer) 364 io_sq->bounce_buf_ctrl.base_buffer = 365 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); 366 367 if (!io_sq->bounce_buf_ctrl.base_buffer) { 368 pr_err("Bounce buffer memory allocation failed\n"); 369 return -ENOMEM; 370 } 371 372 memcpy(&io_sq->llq_info, &ena_dev->llq_info, 373 sizeof(io_sq->llq_info)); 374 375 /* Initiate the first bounce buffer */ 376 io_sq->llq_buf_ctrl.curr_bounce_buf = 377 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); 378 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, 379 0x0, io_sq->llq_info.desc_list_entry_size); 380 io_sq->llq_buf_ctrl.descs_left_in_line = 381 io_sq->llq_info.descs_num_before_header; 382 io_sq->disable_meta_caching = 383 io_sq->llq_info.disable_meta_caching; 384 385 if (io_sq->llq_info.max_entries_in_tx_burst > 0) 386 io_sq->entries_in_tx_burst_left = 387 io_sq->llq_info.max_entries_in_tx_burst; 388 } 389 390 io_sq->tail = 0; 391 io_sq->next_to_comp = 0; 392 io_sq->phase = 1; 393 394 return 0; 395} 396 397static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, 398 struct ena_com_create_io_ctx *ctx, 399 struct ena_com_io_cq *io_cq) 400{ 401 size_t size; 402 int prev_node = 0; 403 404 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr)); 405 406 /* Use the basic completion descriptor for Rx */ 407 io_cq->cdesc_entry_size_in_bytes = 408 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 409 sizeof(struct ena_eth_io_tx_cdesc) : 410 sizeof(struct ena_eth_io_rx_cdesc_base); 411 412 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 413 414 prev_node = dev_to_node(ena_dev->dmadev); 415 set_dev_node(ena_dev->dmadev, ctx->numa_node); 416 io_cq->cdesc_addr.virt_addr = 417 dma_alloc_coherent(ena_dev->dmadev, size, 418 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 419 set_dev_node(ena_dev->dmadev, prev_node); 420 if (!io_cq->cdesc_addr.virt_addr) { 421 io_cq->cdesc_addr.virt_addr = 422 dma_alloc_coherent(ena_dev->dmadev, size, 423 &io_cq->cdesc_addr.phys_addr, 424 GFP_KERNEL); 425 } 426 427 if (!io_cq->cdesc_addr.virt_addr) { 428 pr_err("Memory allocation failed\n"); 429 return -ENOMEM; 430 } 431 432 io_cq->phase = 1; 433 io_cq->head = 0; 434 435 return 0; 436} 437 438static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, 439 struct ena_admin_acq_entry *cqe) 440{ 441 struct ena_comp_ctx *comp_ctx; 442 u16 cmd_id; 443 444 cmd_id = cqe->acq_common_descriptor.command & 445 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; 446 447 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); 448 if (unlikely(!comp_ctx)) { 449 pr_err("comp_ctx is NULL. Changing the admin queue running state\n"); 450 admin_queue->running_state = false; 451 return; 452 } 453 454 comp_ctx->status = ENA_CMD_COMPLETED; 455 comp_ctx->comp_status = cqe->acq_common_descriptor.status; 456 457 if (comp_ctx->user_cqe) 458 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); 459 460 if (!admin_queue->polling) 461 complete(&comp_ctx->wait_event); 462} 463 464static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue) 465{ 466 struct ena_admin_acq_entry *cqe = NULL; 467 u16 comp_num = 0; 468 u16 head_masked; 469 u8 phase; 470 471 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); 472 phase = admin_queue->cq.phase; 473 474 cqe = &admin_queue->cq.entries[head_masked]; 475 476 /* Go over all the completions */ 477 while ((READ_ONCE(cqe->acq_common_descriptor.flags) & 478 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 479 /* Do not read the rest of the completion entry before the 480 * phase bit was validated 481 */ 482 dma_rmb(); 483 ena_com_handle_single_admin_completion(admin_queue, cqe); 484 485 head_masked++; 486 comp_num++; 487 if (unlikely(head_masked == admin_queue->q_depth)) { 488 head_masked = 0; 489 phase = !phase; 490 } 491 492 cqe = &admin_queue->cq.entries[head_masked]; 493 } 494 495 admin_queue->cq.head += comp_num; 496 admin_queue->cq.phase = phase; 497 admin_queue->sq.head += comp_num; 498 admin_queue->stats.completed_cmd += comp_num; 499} 500 501static int ena_com_comp_status_to_errno(u8 comp_status) 502{ 503 if (unlikely(comp_status != 0)) 504 pr_err("Admin command failed[%u]\n", comp_status); 505 506 switch (comp_status) { 507 case ENA_ADMIN_SUCCESS: 508 return 0; 509 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: 510 return -ENOMEM; 511 case ENA_ADMIN_UNSUPPORTED_OPCODE: 512 return -EOPNOTSUPP; 513 case ENA_ADMIN_BAD_OPCODE: 514 case ENA_ADMIN_MALFORMED_REQUEST: 515 case ENA_ADMIN_ILLEGAL_PARAMETER: 516 case ENA_ADMIN_UNKNOWN_ERROR: 517 return -EINVAL; 518 case ENA_ADMIN_RESOURCE_BUSY: 519 return -EAGAIN; 520 } 521 522 return -EINVAL; 523} 524 525static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) 526{ 527 exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP); 528 delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us); 529 delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); 530 usleep_range(delay_us, 2 * delay_us); 531} 532 533static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 534 struct ena_com_admin_queue *admin_queue) 535{ 536 unsigned long flags = 0; 537 unsigned long timeout; 538 int ret; 539 u32 exp = 0; 540 541 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); 542 543 while (1) { 544 spin_lock_irqsave(&admin_queue->q_lock, flags); 545 ena_com_handle_admin_completion(admin_queue); 546 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 547 548 if (comp_ctx->status != ENA_CMD_SUBMITTED) 549 break; 550 551 if (time_is_before_jiffies(timeout)) { 552 pr_err("Wait for completion (polling) timeout\n"); 553 /* ENA didn't have any completion */ 554 spin_lock_irqsave(&admin_queue->q_lock, flags); 555 admin_queue->stats.no_completion++; 556 admin_queue->running_state = false; 557 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 558 559 ret = -ETIME; 560 goto err; 561 } 562 563 ena_delay_exponential_backoff_us(exp++, 564 admin_queue->ena_dev->ena_min_poll_delay_us); 565 } 566 567 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { 568 pr_err("Command was aborted\n"); 569 spin_lock_irqsave(&admin_queue->q_lock, flags); 570 admin_queue->stats.aborted_cmd++; 571 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 572 ret = -ENODEV; 573 goto err; 574 } 575 576 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", 577 comp_ctx->status); 578 579 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 580err: 581 comp_ctxt_release(admin_queue, comp_ctx); 582 return ret; 583} 584 585/* 586 * Set the LLQ configurations of the firmware 587 * 588 * The driver provides only the enabled feature values to the device, 589 * which in turn, checks if they are supported. 590 */ 591static int ena_com_set_llq(struct ena_com_dev *ena_dev) 592{ 593 struct ena_com_admin_queue *admin_queue; 594 struct ena_admin_set_feat_cmd cmd; 595 struct ena_admin_set_feat_resp resp; 596 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 597 int ret; 598 599 memset(&cmd, 0x0, sizeof(cmd)); 600 admin_queue = &ena_dev->admin_queue; 601 602 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 603 cmd.feat_common.feature_id = ENA_ADMIN_LLQ; 604 605 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; 606 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; 607 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; 608 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; 609 610 cmd.u.llq.accel_mode.u.set.enabled_flags = 611 BIT(ENA_ADMIN_DISABLE_META_CACHING) | 612 BIT(ENA_ADMIN_LIMIT_TX_BURST); 613 614 ret = ena_com_execute_admin_command(admin_queue, 615 (struct ena_admin_aq_entry *)&cmd, 616 sizeof(cmd), 617 (struct ena_admin_acq_entry *)&resp, 618 sizeof(resp)); 619 620 if (unlikely(ret)) 621 pr_err("Failed to set LLQ configurations: %d\n", ret); 622 623 return ret; 624} 625 626static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, 627 struct ena_admin_feature_llq_desc *llq_features, 628 struct ena_llq_configurations *llq_default_cfg) 629{ 630 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 631 struct ena_admin_accel_mode_get llq_accel_mode_get; 632 u16 supported_feat; 633 int rc; 634 635 memset(llq_info, 0, sizeof(*llq_info)); 636 637 supported_feat = llq_features->header_location_ctrl_supported; 638 639 if (likely(supported_feat & llq_default_cfg->llq_header_location)) { 640 llq_info->header_location_ctrl = 641 llq_default_cfg->llq_header_location; 642 } else { 643 pr_err("Invalid header location control, supported: 0x%x\n", 644 supported_feat); 645 return -EINVAL; 646 } 647 648 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { 649 supported_feat = llq_features->descriptors_stride_ctrl_supported; 650 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { 651 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; 652 } else { 653 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { 654 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; 655 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { 656 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; 657 } else { 658 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n", 659 supported_feat); 660 return -EINVAL; 661 } 662 663 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 664 llq_default_cfg->llq_stride_ctrl, supported_feat, 665 llq_info->desc_stride_ctrl); 666 } 667 } else { 668 llq_info->desc_stride_ctrl = 0; 669 } 670 671 supported_feat = llq_features->entry_size_ctrl_supported; 672 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { 673 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; 674 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; 675 } else { 676 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { 677 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; 678 llq_info->desc_list_entry_size = 128; 679 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { 680 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; 681 llq_info->desc_list_entry_size = 192; 682 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { 683 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; 684 llq_info->desc_list_entry_size = 256; 685 } else { 686 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n", 687 supported_feat); 688 return -EINVAL; 689 } 690 691 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 692 llq_default_cfg->llq_ring_entry_size, supported_feat, 693 llq_info->desc_list_entry_size); 694 } 695 if (unlikely(llq_info->desc_list_entry_size & 0x7)) { 696 /* The desc list entry size should be whole multiply of 8 697 * This requirement comes from __iowrite64_copy() 698 */ 699 pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size); 700 return -EINVAL; 701 } 702 703 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) 704 llq_info->descs_per_entry = llq_info->desc_list_entry_size / 705 sizeof(struct ena_eth_io_tx_desc); 706 else 707 llq_info->descs_per_entry = 1; 708 709 supported_feat = llq_features->desc_num_before_header_supported; 710 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { 711 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; 712 } else { 713 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { 714 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; 715 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { 716 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; 717 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { 718 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; 719 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { 720 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; 721 } else { 722 pr_err("Invalid descs_num_before_header, supported: 0x%x\n", 723 supported_feat); 724 return -EINVAL; 725 } 726 727 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", 728 llq_default_cfg->llq_num_decs_before_header, 729 supported_feat, llq_info->descs_num_before_header); 730 } 731 /* Check for accelerated queue supported */ 732 llq_accel_mode_get = llq_features->accel_mode.u.get; 733 734 llq_info->disable_meta_caching = 735 !!(llq_accel_mode_get.supported_flags & 736 BIT(ENA_ADMIN_DISABLE_META_CACHING)); 737 738 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) 739 llq_info->max_entries_in_tx_burst = 740 llq_accel_mode_get.max_tx_burst_size / 741 llq_default_cfg->llq_ring_entry_size_value; 742 743 rc = ena_com_set_llq(ena_dev); 744 if (rc) 745 pr_err("Cannot set LLQ configuration: %d\n", rc); 746 747 return rc; 748} 749 750static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, 751 struct ena_com_admin_queue *admin_queue) 752{ 753 unsigned long flags = 0; 754 int ret; 755 756 wait_for_completion_timeout(&comp_ctx->wait_event, 757 usecs_to_jiffies( 758 admin_queue->completion_timeout)); 759 760 /* In case the command wasn't completed find out the root cause. 761 * There might be 2 kinds of errors 762 * 1) No completion (timeout reached) 763 * 2) There is completion but the device didn't get any msi-x interrupt. 764 */ 765 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) { 766 spin_lock_irqsave(&admin_queue->q_lock, flags); 767 ena_com_handle_admin_completion(admin_queue); 768 admin_queue->stats.no_completion++; 769 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 770 771 if (comp_ctx->status == ENA_CMD_COMPLETED) { 772 pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", 773 comp_ctx->cmd_opcode, 774 admin_queue->auto_polling ? "ON" : "OFF"); 775 /* Check if fallback to polling is enabled */ 776 if (admin_queue->auto_polling) 777 admin_queue->polling = true; 778 } else { 779 pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n", 780 comp_ctx->cmd_opcode, comp_ctx->status); 781 } 782 /* Check if shifted to polling mode. 783 * This will happen if there is a completion without an interrupt 784 * and autopolling mode is enabled. Continuing normal execution in such case 785 */ 786 if (!admin_queue->polling) { 787 admin_queue->running_state = false; 788 ret = -ETIME; 789 goto err; 790 } 791 } 792 793 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); 794err: 795 comp_ctxt_release(admin_queue, comp_ctx); 796 return ret; 797} 798 799/* This method read the hardware device register through posting writes 800 * and waiting for response 801 * On timeout the function will return ENA_MMIO_READ_TIMEOUT 802 */ 803static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) 804{ 805 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 806 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = 807 mmio_read->read_resp; 808 u32 mmio_read_reg, ret, i; 809 unsigned long flags = 0; 810 u32 timeout = mmio_read->reg_read_to; 811 812 might_sleep(); 813 814 if (timeout == 0) 815 timeout = ENA_REG_READ_TIMEOUT; 816 817 /* If readless is disabled, perform regular read */ 818 if (!mmio_read->readless_supported) 819 return readl(ena_dev->reg_bar + offset); 820 821 spin_lock_irqsave(&mmio_read->lock, flags); 822 mmio_read->seq_num++; 823 824 read_resp->req_id = mmio_read->seq_num + 0xDEAD; 825 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) & 826 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK; 827 mmio_read_reg |= mmio_read->seq_num & 828 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 829 830 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); 831 832 for (i = 0; i < timeout; i++) { 833 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) 834 break; 835 836 udelay(1); 837 } 838 839 if (unlikely(i == timeout)) { 840 pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", 841 mmio_read->seq_num, offset, read_resp->req_id, 842 read_resp->reg_off); 843 ret = ENA_MMIO_READ_TIMEOUT; 844 goto err; 845 } 846 847 if (read_resp->reg_off != offset) { 848 pr_err("Read failure: wrong offset provided\n"); 849 ret = ENA_MMIO_READ_TIMEOUT; 850 } else { 851 ret = read_resp->reg_val; 852 } 853err: 854 spin_unlock_irqrestore(&mmio_read->lock, flags); 855 856 return ret; 857} 858 859/* There are two types to wait for completion. 860 * Polling mode - wait until the completion is available. 861 * Async mode - wait on wait queue until the completion is ready 862 * (or the timeout expired). 863 * It is expected that the IRQ called ena_com_handle_admin_completion 864 * to mark the completions. 865 */ 866static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx, 867 struct ena_com_admin_queue *admin_queue) 868{ 869 if (admin_queue->polling) 870 return ena_com_wait_and_process_admin_cq_polling(comp_ctx, 871 admin_queue); 872 873 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx, 874 admin_queue); 875} 876 877static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, 878 struct ena_com_io_sq *io_sq) 879{ 880 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 881 struct ena_admin_aq_destroy_sq_cmd destroy_cmd; 882 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp; 883 u8 direction; 884 int ret; 885 886 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 887 888 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 889 direction = ENA_ADMIN_SQ_DIRECTION_TX; 890 else 891 direction = ENA_ADMIN_SQ_DIRECTION_RX; 892 893 destroy_cmd.sq.sq_identity |= (direction << 894 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & 895 ENA_ADMIN_SQ_SQ_DIRECTION_MASK; 896 897 destroy_cmd.sq.sq_idx = io_sq->idx; 898 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ; 899 900 ret = ena_com_execute_admin_command(admin_queue, 901 (struct ena_admin_aq_entry *)&destroy_cmd, 902 sizeof(destroy_cmd), 903 (struct ena_admin_acq_entry *)&destroy_resp, 904 sizeof(destroy_resp)); 905 906 if (unlikely(ret && (ret != -ENODEV))) 907 pr_err("Failed to destroy io sq error: %d\n", ret); 908 909 return ret; 910} 911 912static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, 913 struct ena_com_io_sq *io_sq, 914 struct ena_com_io_cq *io_cq) 915{ 916 size_t size; 917 918 if (io_cq->cdesc_addr.virt_addr) { 919 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; 920 921 dma_free_coherent(ena_dev->dmadev, size, 922 io_cq->cdesc_addr.virt_addr, 923 io_cq->cdesc_addr.phys_addr); 924 925 io_cq->cdesc_addr.virt_addr = NULL; 926 } 927 928 if (io_sq->desc_addr.virt_addr) { 929 size = io_sq->desc_entry_size * io_sq->q_depth; 930 931 dma_free_coherent(ena_dev->dmadev, size, 932 io_sq->desc_addr.virt_addr, 933 io_sq->desc_addr.phys_addr); 934 935 io_sq->desc_addr.virt_addr = NULL; 936 } 937 938 if (io_sq->bounce_buf_ctrl.base_buffer) { 939 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); 940 io_sq->bounce_buf_ctrl.base_buffer = NULL; 941 } 942} 943 944static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, 945 u16 exp_state) 946{ 947 u32 val, exp = 0; 948 unsigned long timeout_stamp; 949 950 /* Convert timeout from resolution of 100ms to us resolution. */ 951 timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout); 952 953 while (1) { 954 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 955 956 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { 957 pr_err("Reg read timeout occurred\n"); 958 return -ETIME; 959 } 960 961 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) == 962 exp_state) 963 return 0; 964 965 if (time_is_before_jiffies(timeout_stamp)) 966 return -ETIME; 967 968 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); 969 } 970} 971 972static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, 973 enum ena_admin_aq_feature_id feature_id) 974{ 975 u32 feature_mask = 1 << feature_id; 976 977 /* Device attributes is always supported */ 978 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) && 979 !(ena_dev->supported_features & feature_mask)) 980 return false; 981 982 return true; 983} 984 985static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, 986 struct ena_admin_get_feat_resp *get_resp, 987 enum ena_admin_aq_feature_id feature_id, 988 dma_addr_t control_buf_dma_addr, 989 u32 control_buff_size, 990 u8 feature_ver) 991{ 992 struct ena_com_admin_queue *admin_queue; 993 struct ena_admin_get_feat_cmd get_cmd; 994 int ret; 995 996 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { 997 pr_debug("Feature %d isn't supported\n", feature_id); 998 return -EOPNOTSUPP; 999 } 1000 1001 memset(&get_cmd, 0x0, sizeof(get_cmd)); 1002 admin_queue = &ena_dev->admin_queue; 1003 1004 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE; 1005 1006 if (control_buff_size) 1007 get_cmd.aq_common_descriptor.flags = 1008 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 1009 else 1010 get_cmd.aq_common_descriptor.flags = 0; 1011 1012 ret = ena_com_mem_addr_set(ena_dev, 1013 &get_cmd.control_buffer.address, 1014 control_buf_dma_addr); 1015 if (unlikely(ret)) { 1016 pr_err("Memory address set failed\n"); 1017 return ret; 1018 } 1019 1020 get_cmd.control_buffer.length = control_buff_size; 1021 get_cmd.feat_common.feature_version = feature_ver; 1022 get_cmd.feat_common.feature_id = feature_id; 1023 1024 ret = ena_com_execute_admin_command(admin_queue, 1025 (struct ena_admin_aq_entry *) 1026 &get_cmd, 1027 sizeof(get_cmd), 1028 (struct ena_admin_acq_entry *) 1029 get_resp, 1030 sizeof(*get_resp)); 1031 1032 if (unlikely(ret)) 1033 pr_err("Failed to submit get_feature command %d error: %d\n", 1034 feature_id, ret); 1035 1036 return ret; 1037} 1038 1039static int ena_com_get_feature(struct ena_com_dev *ena_dev, 1040 struct ena_admin_get_feat_resp *get_resp, 1041 enum ena_admin_aq_feature_id feature_id, 1042 u8 feature_ver) 1043{ 1044 return ena_com_get_feature_ex(ena_dev, 1045 get_resp, 1046 feature_id, 1047 0, 1048 0, 1049 feature_ver); 1050} 1051 1052int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev) 1053{ 1054 return ena_dev->rss.hash_func; 1055} 1056 1057static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) 1058{ 1059 struct ena_admin_feature_rss_flow_hash_control *hash_key = 1060 (ena_dev->rss).hash_key; 1061 1062 netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key)); 1063 /* The key buffer is stored in the device in an array of 1064 * uint32 elements. 1065 */ 1066 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS; 1067} 1068 1069static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) 1070{ 1071 struct ena_rss *rss = &ena_dev->rss; 1072 1073 if (!ena_com_check_supported_feature_id(ena_dev, 1074 ENA_ADMIN_RSS_HASH_FUNCTION)) 1075 return -EOPNOTSUPP; 1076 1077 rss->hash_key = 1078 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1079 &rss->hash_key_dma_addr, GFP_KERNEL); 1080 1081 if (unlikely(!rss->hash_key)) 1082 return -ENOMEM; 1083 1084 return 0; 1085} 1086 1087static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) 1088{ 1089 struct ena_rss *rss = &ena_dev->rss; 1090 1091 if (rss->hash_key) 1092 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1093 rss->hash_key, rss->hash_key_dma_addr); 1094 rss->hash_key = NULL; 1095} 1096 1097static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) 1098{ 1099 struct ena_rss *rss = &ena_dev->rss; 1100 1101 rss->hash_ctrl = 1102 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1103 &rss->hash_ctrl_dma_addr, GFP_KERNEL); 1104 1105 if (unlikely(!rss->hash_ctrl)) 1106 return -ENOMEM; 1107 1108 return 0; 1109} 1110 1111static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) 1112{ 1113 struct ena_rss *rss = &ena_dev->rss; 1114 1115 if (rss->hash_ctrl) 1116 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1117 rss->hash_ctrl, rss->hash_ctrl_dma_addr); 1118 rss->hash_ctrl = NULL; 1119} 1120 1121static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, 1122 u16 log_size) 1123{ 1124 struct ena_rss *rss = &ena_dev->rss; 1125 struct ena_admin_get_feat_resp get_resp; 1126 size_t tbl_size; 1127 int ret; 1128 1129 ret = ena_com_get_feature(ena_dev, &get_resp, 1130 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0); 1131 if (unlikely(ret)) 1132 return ret; 1133 1134 if ((get_resp.u.ind_table.min_size > log_size) || 1135 (get_resp.u.ind_table.max_size < log_size)) { 1136 pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", 1137 1 << log_size, 1 << get_resp.u.ind_table.min_size, 1138 1 << get_resp.u.ind_table.max_size); 1139 return -EINVAL; 1140 } 1141 1142 tbl_size = (1ULL << log_size) * 1143 sizeof(struct ena_admin_rss_ind_table_entry); 1144 1145 rss->rss_ind_tbl = 1146 dma_alloc_coherent(ena_dev->dmadev, tbl_size, 1147 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 1148 if (unlikely(!rss->rss_ind_tbl)) 1149 goto mem_err1; 1150 1151 tbl_size = (1ULL << log_size) * sizeof(u16); 1152 rss->host_rss_ind_tbl = 1153 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); 1154 if (unlikely(!rss->host_rss_ind_tbl)) 1155 goto mem_err2; 1156 1157 rss->tbl_log_size = log_size; 1158 1159 return 0; 1160 1161mem_err2: 1162 tbl_size = (1ULL << log_size) * 1163 sizeof(struct ena_admin_rss_ind_table_entry); 1164 1165 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, 1166 rss->rss_ind_tbl_dma_addr); 1167 rss->rss_ind_tbl = NULL; 1168mem_err1: 1169 rss->tbl_log_size = 0; 1170 return -ENOMEM; 1171} 1172 1173static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) 1174{ 1175 struct ena_rss *rss = &ena_dev->rss; 1176 size_t tbl_size = (1ULL << rss->tbl_log_size) * 1177 sizeof(struct ena_admin_rss_ind_table_entry); 1178 1179 if (rss->rss_ind_tbl) 1180 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, 1181 rss->rss_ind_tbl_dma_addr); 1182 rss->rss_ind_tbl = NULL; 1183 1184 if (rss->host_rss_ind_tbl) 1185 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl); 1186 rss->host_rss_ind_tbl = NULL; 1187} 1188 1189static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, 1190 struct ena_com_io_sq *io_sq, u16 cq_idx) 1191{ 1192 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1193 struct ena_admin_aq_create_sq_cmd create_cmd; 1194 struct ena_admin_acq_create_sq_resp_desc cmd_completion; 1195 u8 direction; 1196 int ret; 1197 1198 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1199 1200 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; 1201 1202 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1203 direction = ENA_ADMIN_SQ_DIRECTION_TX; 1204 else 1205 direction = ENA_ADMIN_SQ_DIRECTION_RX; 1206 1207 create_cmd.sq_identity |= (direction << 1208 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & 1209 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK; 1210 1211 create_cmd.sq_caps_2 |= io_sq->mem_queue_type & 1212 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK; 1213 1214 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC << 1215 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & 1216 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK; 1217 1218 create_cmd.sq_caps_3 |= 1219 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK; 1220 1221 create_cmd.cq_idx = cq_idx; 1222 create_cmd.sq_depth = io_sq->q_depth; 1223 1224 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { 1225 ret = ena_com_mem_addr_set(ena_dev, 1226 &create_cmd.sq_ba, 1227 io_sq->desc_addr.phys_addr); 1228 if (unlikely(ret)) { 1229 pr_err("Memory address set failed\n"); 1230 return ret; 1231 } 1232 } 1233 1234 ret = ena_com_execute_admin_command(admin_queue, 1235 (struct ena_admin_aq_entry *)&create_cmd, 1236 sizeof(create_cmd), 1237 (struct ena_admin_acq_entry *)&cmd_completion, 1238 sizeof(cmd_completion)); 1239 if (unlikely(ret)) { 1240 pr_err("Failed to create IO SQ. error: %d\n", ret); 1241 return ret; 1242 } 1243 1244 io_sq->idx = cmd_completion.sq_idx; 1245 1246 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1247 (uintptr_t)cmd_completion.sq_doorbell_offset); 1248 1249 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 1250 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar 1251 + cmd_completion.llq_headers_offset); 1252 1253 io_sq->desc_addr.pbuf_dev_addr = 1254 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + 1255 cmd_completion.llq_descriptors_offset); 1256 } 1257 1258 pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); 1259 1260 return ret; 1261} 1262 1263static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) 1264{ 1265 struct ena_rss *rss = &ena_dev->rss; 1266 struct ena_com_io_sq *io_sq; 1267 u16 qid; 1268 int i; 1269 1270 for (i = 0; i < 1 << rss->tbl_log_size; i++) { 1271 qid = rss->host_rss_ind_tbl[i]; 1272 if (qid >= ENA_TOTAL_NUM_QUEUES) 1273 return -EINVAL; 1274 1275 io_sq = &ena_dev->io_sq_queues[qid]; 1276 1277 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX) 1278 return -EINVAL; 1279 1280 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; 1281 } 1282 1283 return 0; 1284} 1285 1286static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, 1287 u16 intr_delay_resolution) 1288{ 1289 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; 1290 1291 if (unlikely(!intr_delay_resolution)) { 1292 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); 1293 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; 1294 } 1295 1296 /* update Rx */ 1297 ena_dev->intr_moder_rx_interval = 1298 ena_dev->intr_moder_rx_interval * 1299 prev_intr_delay_resolution / 1300 intr_delay_resolution; 1301 1302 /* update Tx */ 1303 ena_dev->intr_moder_tx_interval = 1304 ena_dev->intr_moder_tx_interval * 1305 prev_intr_delay_resolution / 1306 intr_delay_resolution; 1307 1308 ena_dev->intr_delay_resolution = intr_delay_resolution; 1309} 1310 1311/*****************************************************************************/ 1312/******************************* API ******************************/ 1313/*****************************************************************************/ 1314 1315int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, 1316 struct ena_admin_aq_entry *cmd, 1317 size_t cmd_size, 1318 struct ena_admin_acq_entry *comp, 1319 size_t comp_size) 1320{ 1321 struct ena_comp_ctx *comp_ctx; 1322 int ret; 1323 1324 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, 1325 comp, comp_size); 1326 if (IS_ERR(comp_ctx)) { 1327 if (comp_ctx == ERR_PTR(-ENODEV)) 1328 pr_debug("Failed to submit command [%ld]\n", 1329 PTR_ERR(comp_ctx)); 1330 else 1331 pr_err("Failed to submit command [%ld]\n", 1332 PTR_ERR(comp_ctx)); 1333 1334 return PTR_ERR(comp_ctx); 1335 } 1336 1337 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); 1338 if (unlikely(ret)) { 1339 if (admin_queue->running_state) 1340 pr_err("Failed to process command. ret = %d\n", ret); 1341 else 1342 pr_debug("Failed to process command. ret = %d\n", ret); 1343 } 1344 return ret; 1345} 1346 1347int ena_com_create_io_cq(struct ena_com_dev *ena_dev, 1348 struct ena_com_io_cq *io_cq) 1349{ 1350 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1351 struct ena_admin_aq_create_cq_cmd create_cmd; 1352 struct ena_admin_acq_create_cq_resp_desc cmd_completion; 1353 int ret; 1354 1355 memset(&create_cmd, 0x0, sizeof(create_cmd)); 1356 1357 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; 1358 1359 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) & 1360 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK; 1361 create_cmd.cq_caps_1 |= 1362 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK; 1363 1364 create_cmd.msix_vector = io_cq->msix_vector; 1365 create_cmd.cq_depth = io_cq->q_depth; 1366 1367 ret = ena_com_mem_addr_set(ena_dev, 1368 &create_cmd.cq_ba, 1369 io_cq->cdesc_addr.phys_addr); 1370 if (unlikely(ret)) { 1371 pr_err("Memory address set failed\n"); 1372 return ret; 1373 } 1374 1375 ret = ena_com_execute_admin_command(admin_queue, 1376 (struct ena_admin_aq_entry *)&create_cmd, 1377 sizeof(create_cmd), 1378 (struct ena_admin_acq_entry *)&cmd_completion, 1379 sizeof(cmd_completion)); 1380 if (unlikely(ret)) { 1381 pr_err("Failed to create IO CQ. error: %d\n", ret); 1382 return ret; 1383 } 1384 1385 io_cq->idx = cmd_completion.cq_idx; 1386 1387 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1388 cmd_completion.cq_interrupt_unmask_register_offset); 1389 1390 if (cmd_completion.cq_head_db_register_offset) 1391 io_cq->cq_head_db_reg = 1392 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1393 cmd_completion.cq_head_db_register_offset); 1394 1395 if (cmd_completion.numa_node_register_offset) 1396 io_cq->numa_node_cfg_reg = 1397 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1398 cmd_completion.numa_node_register_offset); 1399 1400 pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); 1401 1402 return ret; 1403} 1404 1405int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, 1406 struct ena_com_io_sq **io_sq, 1407 struct ena_com_io_cq **io_cq) 1408{ 1409 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1410 pr_err("Invalid queue number %d but the max is %d\n", qid, 1411 ENA_TOTAL_NUM_QUEUES); 1412 return -EINVAL; 1413 } 1414 1415 *io_sq = &ena_dev->io_sq_queues[qid]; 1416 *io_cq = &ena_dev->io_cq_queues[qid]; 1417 1418 return 0; 1419} 1420 1421void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) 1422{ 1423 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1424 struct ena_comp_ctx *comp_ctx; 1425 u16 i; 1426 1427 if (!admin_queue->comp_ctx) 1428 return; 1429 1430 for (i = 0; i < admin_queue->q_depth; i++) { 1431 comp_ctx = get_comp_ctxt(admin_queue, i, false); 1432 if (unlikely(!comp_ctx)) 1433 break; 1434 1435 comp_ctx->status = ENA_CMD_ABORTED; 1436 1437 complete(&comp_ctx->wait_event); 1438 } 1439} 1440 1441void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) 1442{ 1443 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1444 unsigned long flags = 0; 1445 u32 exp = 0; 1446 1447 spin_lock_irqsave(&admin_queue->q_lock, flags); 1448 while (atomic_read(&admin_queue->outstanding_cmds) != 0) { 1449 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1450 ena_delay_exponential_backoff_us(exp++, 1451 ena_dev->ena_min_poll_delay_us); 1452 spin_lock_irqsave(&admin_queue->q_lock, flags); 1453 } 1454 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1455} 1456 1457int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, 1458 struct ena_com_io_cq *io_cq) 1459{ 1460 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1461 struct ena_admin_aq_destroy_cq_cmd destroy_cmd; 1462 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; 1463 int ret; 1464 1465 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd)); 1466 1467 destroy_cmd.cq_idx = io_cq->idx; 1468 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; 1469 1470 ret = ena_com_execute_admin_command(admin_queue, 1471 (struct ena_admin_aq_entry *)&destroy_cmd, 1472 sizeof(destroy_cmd), 1473 (struct ena_admin_acq_entry *)&destroy_resp, 1474 sizeof(destroy_resp)); 1475 1476 if (unlikely(ret && (ret != -ENODEV))) 1477 pr_err("Failed to destroy IO CQ. error: %d\n", ret); 1478 1479 return ret; 1480} 1481 1482bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev) 1483{ 1484 return ena_dev->admin_queue.running_state; 1485} 1486 1487void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) 1488{ 1489 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1490 unsigned long flags = 0; 1491 1492 spin_lock_irqsave(&admin_queue->q_lock, flags); 1493 ena_dev->admin_queue.running_state = state; 1494 spin_unlock_irqrestore(&admin_queue->q_lock, flags); 1495} 1496 1497void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) 1498{ 1499 u16 depth = ena_dev->aenq.q_depth; 1500 1501 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); 1502 1503 /* Init head_db to mark that all entries in the queue 1504 * are initially available 1505 */ 1506 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 1507} 1508 1509int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) 1510{ 1511 struct ena_com_admin_queue *admin_queue; 1512 struct ena_admin_set_feat_cmd cmd; 1513 struct ena_admin_set_feat_resp resp; 1514 struct ena_admin_get_feat_resp get_resp; 1515 int ret; 1516 1517 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); 1518 if (ret) { 1519 pr_info("Can't get aenq configuration\n"); 1520 return ret; 1521 } 1522 1523 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { 1524 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", 1525 get_resp.u.aenq.supported_groups, groups_flag); 1526 return -EOPNOTSUPP; 1527 } 1528 1529 memset(&cmd, 0x0, sizeof(cmd)); 1530 admin_queue = &ena_dev->admin_queue; 1531 1532 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 1533 cmd.aq_common_descriptor.flags = 0; 1534 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG; 1535 cmd.u.aenq.enabled_groups = groups_flag; 1536 1537 ret = ena_com_execute_admin_command(admin_queue, 1538 (struct ena_admin_aq_entry *)&cmd, 1539 sizeof(cmd), 1540 (struct ena_admin_acq_entry *)&resp, 1541 sizeof(resp)); 1542 1543 if (unlikely(ret)) 1544 pr_err("Failed to config AENQ ret: %d\n", ret); 1545 1546 return ret; 1547} 1548 1549int ena_com_get_dma_width(struct ena_com_dev *ena_dev) 1550{ 1551 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 1552 int width; 1553 1554 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { 1555 pr_err("Reg read timeout occurred\n"); 1556 return -ETIME; 1557 } 1558 1559 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> 1560 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; 1561 1562 pr_debug("ENA dma width: %d\n", width); 1563 1564 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { 1565 pr_err("DMA width illegal value: %d\n", width); 1566 return -EINVAL; 1567 } 1568 1569 ena_dev->dma_addr_bits = width; 1570 1571 return width; 1572} 1573 1574int ena_com_validate_version(struct ena_com_dev *ena_dev) 1575{ 1576 u32 ver; 1577 u32 ctrl_ver; 1578 u32 ctrl_ver_masked; 1579 1580 /* Make sure the ENA version and the controller version are at least 1581 * as the driver expects 1582 */ 1583 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF); 1584 ctrl_ver = ena_com_reg_bar_read32(ena_dev, 1585 ENA_REGS_CONTROLLER_VERSION_OFF); 1586 1587 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || 1588 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { 1589 pr_err("Reg read timeout occurred\n"); 1590 return -ETIME; 1591 } 1592 1593 pr_info("ENA device version: %d.%d\n", 1594 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> 1595 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, 1596 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); 1597 1598 pr_info("ENA controller version: %d.%d.%d implementation version %d\n", 1599 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> 1600 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, 1601 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> 1602 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, 1603 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), 1604 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> 1605 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); 1606 1607 ctrl_ver_masked = 1608 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | 1609 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) | 1610 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK); 1611 1612 /* Validate the ctrl version without the implementation ID */ 1613 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { 1614 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); 1615 return -1; 1616 } 1617 1618 return 0; 1619} 1620 1621static void 1622ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev, 1623 struct ena_com_admin_queue *admin_queue) 1624 1625{ 1626 if (!admin_queue->comp_ctx) 1627 return; 1628 1629 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); 1630 1631 admin_queue->comp_ctx = NULL; 1632} 1633 1634void ena_com_admin_destroy(struct ena_com_dev *ena_dev) 1635{ 1636 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1637 struct ena_com_admin_cq *cq = &admin_queue->cq; 1638 struct ena_com_admin_sq *sq = &admin_queue->sq; 1639 struct ena_com_aenq *aenq = &ena_dev->aenq; 1640 u16 size; 1641 1642 ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue); 1643 1644 size = ADMIN_SQ_SIZE(admin_queue->q_depth); 1645 if (sq->entries) 1646 dma_free_coherent(ena_dev->dmadev, size, sq->entries, 1647 sq->dma_addr); 1648 sq->entries = NULL; 1649 1650 size = ADMIN_CQ_SIZE(admin_queue->q_depth); 1651 if (cq->entries) 1652 dma_free_coherent(ena_dev->dmadev, size, cq->entries, 1653 cq->dma_addr); 1654 cq->entries = NULL; 1655 1656 size = ADMIN_AENQ_SIZE(aenq->q_depth); 1657 if (ena_dev->aenq.entries) 1658 dma_free_coherent(ena_dev->dmadev, size, aenq->entries, 1659 aenq->dma_addr); 1660 aenq->entries = NULL; 1661} 1662 1663void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1664{ 1665 u32 mask_value = 0; 1666 1667 if (polling) 1668 mask_value = ENA_REGS_ADMIN_INTR_MASK; 1669 1670 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); 1671 ena_dev->admin_queue.polling = polling; 1672} 1673 1674void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, 1675 bool polling) 1676{ 1677 ena_dev->admin_queue.auto_polling = polling; 1678} 1679 1680int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) 1681{ 1682 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1683 1684 spin_lock_init(&mmio_read->lock); 1685 mmio_read->read_resp = 1686 dma_alloc_coherent(ena_dev->dmadev, 1687 sizeof(*mmio_read->read_resp), 1688 &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1689 if (unlikely(!mmio_read->read_resp)) 1690 goto err; 1691 1692 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 1693 1694 mmio_read->read_resp->req_id = 0x0; 1695 mmio_read->seq_num = 0x0; 1696 mmio_read->readless_supported = true; 1697 1698 return 0; 1699 1700err: 1701 1702 return -ENOMEM; 1703} 1704 1705void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) 1706{ 1707 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1708 1709 mmio_read->readless_supported = readless_supported; 1710} 1711 1712void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) 1713{ 1714 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1715 1716 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1717 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1718 1719 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), 1720 mmio_read->read_resp, mmio_read->read_resp_dma_addr); 1721 1722 mmio_read->read_resp = NULL; 1723} 1724 1725void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev) 1726{ 1727 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; 1728 u32 addr_low, addr_high; 1729 1730 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr); 1731 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr); 1732 1733 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); 1734 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); 1735} 1736 1737int ena_com_admin_init(struct ena_com_dev *ena_dev, 1738 struct ena_aenq_handlers *aenq_handlers) 1739{ 1740 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 1741 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; 1742 int ret; 1743 1744 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 1745 1746 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { 1747 pr_err("Reg read timeout occurred\n"); 1748 return -ETIME; 1749 } 1750 1751 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { 1752 pr_err("Device isn't ready, abort com init\n"); 1753 return -ENODEV; 1754 } 1755 1756 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; 1757 1758 admin_queue->q_dmadev = ena_dev->dmadev; 1759 admin_queue->polling = false; 1760 admin_queue->curr_cmd_id = 0; 1761 1762 atomic_set(&admin_queue->outstanding_cmds, 0); 1763 1764 spin_lock_init(&admin_queue->q_lock); 1765 1766 ret = ena_com_init_comp_ctxt(admin_queue); 1767 if (ret) 1768 goto error; 1769 1770 ret = ena_com_admin_init_sq(admin_queue); 1771 if (ret) 1772 goto error; 1773 1774 ret = ena_com_admin_init_cq(admin_queue); 1775 if (ret) 1776 goto error; 1777 1778 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + 1779 ENA_REGS_AQ_DB_OFF); 1780 1781 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); 1782 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); 1783 1784 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF); 1785 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF); 1786 1787 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); 1788 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); 1789 1790 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF); 1791 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF); 1792 1793 aq_caps = 0; 1794 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK; 1795 aq_caps |= (sizeof(struct ena_admin_aq_entry) << 1796 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) & 1797 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK; 1798 1799 acq_caps = 0; 1800 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK; 1801 acq_caps |= (sizeof(struct ena_admin_acq_entry) << 1802 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) & 1803 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK; 1804 1805 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF); 1806 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF); 1807 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers); 1808 if (ret) 1809 goto error; 1810 1811 admin_queue->ena_dev = ena_dev; 1812 admin_queue->running_state = true; 1813 1814 return 0; 1815error: 1816 ena_com_admin_destroy(ena_dev); 1817 1818 return ret; 1819} 1820 1821int ena_com_create_io_queue(struct ena_com_dev *ena_dev, 1822 struct ena_com_create_io_ctx *ctx) 1823{ 1824 struct ena_com_io_sq *io_sq; 1825 struct ena_com_io_cq *io_cq; 1826 int ret; 1827 1828 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { 1829 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", 1830 ctx->qid, ENA_TOTAL_NUM_QUEUES); 1831 return -EINVAL; 1832 } 1833 1834 io_sq = &ena_dev->io_sq_queues[ctx->qid]; 1835 io_cq = &ena_dev->io_cq_queues[ctx->qid]; 1836 1837 memset(io_sq, 0x0, sizeof(*io_sq)); 1838 memset(io_cq, 0x0, sizeof(*io_cq)); 1839 1840 /* Init CQ */ 1841 io_cq->q_depth = ctx->queue_size; 1842 io_cq->direction = ctx->direction; 1843 io_cq->qid = ctx->qid; 1844 1845 io_cq->msix_vector = ctx->msix_vector; 1846 1847 io_sq->q_depth = ctx->queue_size; 1848 io_sq->direction = ctx->direction; 1849 io_sq->qid = ctx->qid; 1850 1851 io_sq->mem_queue_type = ctx->mem_queue_type; 1852 1853 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) 1854 /* header length is limited to 8 bits */ 1855 io_sq->tx_max_header_size = 1856 min_t(u32, ena_dev->tx_max_header_size, SZ_256); 1857 1858 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); 1859 if (ret) 1860 goto error; 1861 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); 1862 if (ret) 1863 goto error; 1864 1865 ret = ena_com_create_io_cq(ena_dev, io_cq); 1866 if (ret) 1867 goto error; 1868 1869 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx); 1870 if (ret) 1871 goto destroy_io_cq; 1872 1873 return 0; 1874 1875destroy_io_cq: 1876 ena_com_destroy_io_cq(ena_dev, io_cq); 1877error: 1878 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1879 return ret; 1880} 1881 1882void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) 1883{ 1884 struct ena_com_io_sq *io_sq; 1885 struct ena_com_io_cq *io_cq; 1886 1887 if (qid >= ENA_TOTAL_NUM_QUEUES) { 1888 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid, 1889 ENA_TOTAL_NUM_QUEUES); 1890 return; 1891 } 1892 1893 io_sq = &ena_dev->io_sq_queues[qid]; 1894 io_cq = &ena_dev->io_cq_queues[qid]; 1895 1896 ena_com_destroy_io_sq(ena_dev, io_sq); 1897 ena_com_destroy_io_cq(ena_dev, io_cq); 1898 1899 ena_com_io_queue_free(ena_dev, io_sq, io_cq); 1900} 1901 1902int ena_com_get_link_params(struct ena_com_dev *ena_dev, 1903 struct ena_admin_get_feat_resp *resp) 1904{ 1905 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0); 1906} 1907 1908int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, 1909 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1910{ 1911 struct ena_admin_get_feat_resp get_resp; 1912 int rc; 1913 1914 rc = ena_com_get_feature(ena_dev, &get_resp, 1915 ENA_ADMIN_DEVICE_ATTRIBUTES, 0); 1916 if (rc) 1917 return rc; 1918 1919 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, 1920 sizeof(get_resp.u.dev_attr)); 1921 1922 ena_dev->supported_features = get_resp.u.dev_attr.supported_features; 1923 1924 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { 1925 rc = ena_com_get_feature(ena_dev, &get_resp, 1926 ENA_ADMIN_MAX_QUEUES_EXT, 1927 ENA_FEATURE_MAX_QUEUE_EXT_VER); 1928 if (rc) 1929 return rc; 1930 1931 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) 1932 return -EINVAL; 1933 1934 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, 1935 sizeof(get_resp.u.max_queue_ext)); 1936 ena_dev->tx_max_header_size = 1937 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size; 1938 } else { 1939 rc = ena_com_get_feature(ena_dev, &get_resp, 1940 ENA_ADMIN_MAX_QUEUES_NUM, 0); 1941 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, 1942 sizeof(get_resp.u.max_queue)); 1943 ena_dev->tx_max_header_size = 1944 get_resp.u.max_queue.max_header_size; 1945 1946 if (rc) 1947 return rc; 1948 } 1949 1950 rc = ena_com_get_feature(ena_dev, &get_resp, 1951 ENA_ADMIN_AENQ_CONFIG, 0); 1952 if (rc) 1953 return rc; 1954 1955 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq, 1956 sizeof(get_resp.u.aenq)); 1957 1958 rc = ena_com_get_feature(ena_dev, &get_resp, 1959 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); 1960 if (rc) 1961 return rc; 1962 1963 memcpy(&get_feat_ctx->offload, &get_resp.u.offload, 1964 sizeof(get_resp.u.offload)); 1965 1966 /* Driver hints isn't mandatory admin command. So in case the 1967 * command isn't supported set driver hints to 0 1968 */ 1969 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0); 1970 1971 if (!rc) 1972 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, 1973 sizeof(get_resp.u.hw_hints)); 1974 else if (rc == -EOPNOTSUPP) 1975 memset(&get_feat_ctx->hw_hints, 0x0, 1976 sizeof(get_feat_ctx->hw_hints)); 1977 else 1978 return rc; 1979 1980 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0); 1981 if (!rc) 1982 memcpy(&get_feat_ctx->llq, &get_resp.u.llq, 1983 sizeof(get_resp.u.llq)); 1984 else if (rc == -EOPNOTSUPP) 1985 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); 1986 else 1987 return rc; 1988 1989 return 0; 1990} 1991 1992void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev) 1993{ 1994 ena_com_handle_admin_completion(&ena_dev->admin_queue); 1995} 1996 1997/* ena_handle_specific_aenq_event: 1998 * return the handler that is relevant to the specific event group 1999 */ 2000static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev, 2001 u16 group) 2002{ 2003 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers; 2004 2005 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) 2006 return aenq_handlers->handlers[group]; 2007 2008 return aenq_handlers->unimplemented_handler; 2009} 2010 2011/* ena_aenq_intr_handler: 2012 * handles the aenq incoming events. 2013 * pop events from the queue and apply the specific handler 2014 */ 2015void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) 2016{ 2017 struct ena_admin_aenq_entry *aenq_e; 2018 struct ena_admin_aenq_common_desc *aenq_common; 2019 struct ena_com_aenq *aenq = &ena_dev->aenq; 2020 u64 timestamp; 2021 ena_aenq_handler handler_cb; 2022 u16 masked_head, processed = 0; 2023 u8 phase; 2024 2025 masked_head = aenq->head & (aenq->q_depth - 1); 2026 phase = aenq->phase; 2027 aenq_e = &aenq->entries[masked_head]; /* Get first entry */ 2028 aenq_common = &aenq_e->aenq_common_desc; 2029 2030 /* Go over all the events */ 2031 while ((READ_ONCE(aenq_common->flags) & 2032 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { 2033 /* Make sure the phase bit (ownership) is as expected before 2034 * reading the rest of the descriptor. 2035 */ 2036 dma_rmb(); 2037 2038 timestamp = (u64)aenq_common->timestamp_low | 2039 ((u64)aenq_common->timestamp_high << 32); 2040 2041 pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", 2042 aenq_common->group, aenq_common->syndrome, timestamp); 2043 2044 /* Handle specific event*/ 2045 handler_cb = ena_com_get_specific_aenq_cb(ena_dev, 2046 aenq_common->group); 2047 handler_cb(data, aenq_e); /* call the actual event handler*/ 2048 2049 /* Get next event entry */ 2050 masked_head++; 2051 processed++; 2052 2053 if (unlikely(masked_head == aenq->q_depth)) { 2054 masked_head = 0; 2055 phase = !phase; 2056 } 2057 aenq_e = &aenq->entries[masked_head]; 2058 aenq_common = &aenq_e->aenq_common_desc; 2059 } 2060 2061 aenq->head += processed; 2062 aenq->phase = phase; 2063 2064 /* Don't update aenq doorbell if there weren't any processed events */ 2065 if (!processed) 2066 return; 2067 2068 /* write the aenq doorbell after all AENQ descriptors were read */ 2069 mb(); 2070 writel_relaxed((u32)aenq->head, 2071 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); 2072} 2073 2074int ena_com_dev_reset(struct ena_com_dev *ena_dev, 2075 enum ena_regs_reset_reason_types reset_reason) 2076{ 2077 u32 stat, timeout, cap, reset_val; 2078 int rc; 2079 2080 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); 2081 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); 2082 2083 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || 2084 (cap == ENA_MMIO_READ_TIMEOUT))) { 2085 pr_err("Reg read32 timeout occurred\n"); 2086 return -ETIME; 2087 } 2088 2089 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { 2090 pr_err("Device isn't ready, can't reset device\n"); 2091 return -EINVAL; 2092 } 2093 2094 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> 2095 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; 2096 if (timeout == 0) { 2097 pr_err("Invalid timeout value\n"); 2098 return -EINVAL; 2099 } 2100 2101 /* start reset */ 2102 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; 2103 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) & 2104 ENA_REGS_DEV_CTL_RESET_REASON_MASK; 2105 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2106 2107 /* Write again the MMIO read request address */ 2108 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); 2109 2110 rc = wait_for_reset_state(ena_dev, timeout, 2111 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); 2112 if (rc != 0) { 2113 pr_err("Reset indication didn't turn on\n"); 2114 return rc; 2115 } 2116 2117 /* reset done */ 2118 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); 2119 rc = wait_for_reset_state(ena_dev, timeout, 0); 2120 if (rc != 0) { 2121 pr_err("Reset indication didn't turn off\n"); 2122 return rc; 2123 } 2124 2125 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >> 2126 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT; 2127 if (timeout) 2128 /* the resolution of timeout reg is 100ms */ 2129 ena_dev->admin_queue.completion_timeout = timeout * 100000; 2130 else 2131 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US; 2132 2133 return 0; 2134} 2135 2136static int ena_get_dev_stats(struct ena_com_dev *ena_dev, 2137 struct ena_com_stats_ctx *ctx, 2138 enum ena_admin_get_stats_type type) 2139{ 2140 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd; 2141 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp; 2142 struct ena_com_admin_queue *admin_queue; 2143 int ret; 2144 2145 admin_queue = &ena_dev->admin_queue; 2146 2147 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS; 2148 get_cmd->aq_common_descriptor.flags = 0; 2149 get_cmd->type = type; 2150 2151 ret = ena_com_execute_admin_command(admin_queue, 2152 (struct ena_admin_aq_entry *)get_cmd, 2153 sizeof(*get_cmd), 2154 (struct ena_admin_acq_entry *)get_resp, 2155 sizeof(*get_resp)); 2156 2157 if (unlikely(ret)) 2158 pr_err("Failed to get stats. error: %d\n", ret); 2159 2160 return ret; 2161} 2162 2163int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, 2164 struct ena_admin_eni_stats *stats) 2165{ 2166 struct ena_com_stats_ctx ctx; 2167 int ret; 2168 2169 memset(&ctx, 0x0, sizeof(ctx)); 2170 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI); 2171 if (likely(ret == 0)) 2172 memcpy(stats, &ctx.get_resp.u.eni_stats, 2173 sizeof(ctx.get_resp.u.eni_stats)); 2174 2175 return ret; 2176} 2177 2178int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, 2179 struct ena_admin_basic_stats *stats) 2180{ 2181 struct ena_com_stats_ctx ctx; 2182 int ret; 2183 2184 memset(&ctx, 0x0, sizeof(ctx)); 2185 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); 2186 if (likely(ret == 0)) 2187 memcpy(stats, &ctx.get_resp.u.basic_stats, 2188 sizeof(ctx.get_resp.u.basic_stats)); 2189 2190 return ret; 2191} 2192 2193int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) 2194{ 2195 struct ena_com_admin_queue *admin_queue; 2196 struct ena_admin_set_feat_cmd cmd; 2197 struct ena_admin_set_feat_resp resp; 2198 int ret; 2199 2200 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { 2201 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU); 2202 return -EOPNOTSUPP; 2203 } 2204 2205 memset(&cmd, 0x0, sizeof(cmd)); 2206 admin_queue = &ena_dev->admin_queue; 2207 2208 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2209 cmd.aq_common_descriptor.flags = 0; 2210 cmd.feat_common.feature_id = ENA_ADMIN_MTU; 2211 cmd.u.mtu.mtu = mtu; 2212 2213 ret = ena_com_execute_admin_command(admin_queue, 2214 (struct ena_admin_aq_entry *)&cmd, 2215 sizeof(cmd), 2216 (struct ena_admin_acq_entry *)&resp, 2217 sizeof(resp)); 2218 2219 if (unlikely(ret)) 2220 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret); 2221 2222 return ret; 2223} 2224 2225int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, 2226 struct ena_admin_feature_offload_desc *offload) 2227{ 2228 int ret; 2229 struct ena_admin_get_feat_resp resp; 2230 2231 ret = ena_com_get_feature(ena_dev, &resp, 2232 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); 2233 if (unlikely(ret)) { 2234 pr_err("Failed to get offload capabilities %d\n", ret); 2235 return ret; 2236 } 2237 2238 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); 2239 2240 return 0; 2241} 2242 2243int ena_com_set_hash_function(struct ena_com_dev *ena_dev) 2244{ 2245 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2246 struct ena_rss *rss = &ena_dev->rss; 2247 struct ena_admin_set_feat_cmd cmd; 2248 struct ena_admin_set_feat_resp resp; 2249 struct ena_admin_get_feat_resp get_resp; 2250 int ret; 2251 2252 if (!ena_com_check_supported_feature_id(ena_dev, 2253 ENA_ADMIN_RSS_HASH_FUNCTION)) { 2254 pr_debug("Feature %d isn't supported\n", 2255 ENA_ADMIN_RSS_HASH_FUNCTION); 2256 return -EOPNOTSUPP; 2257 } 2258 2259 /* Validate hash function is supported */ 2260 ret = ena_com_get_feature(ena_dev, &get_resp, 2261 ENA_ADMIN_RSS_HASH_FUNCTION, 0); 2262 if (unlikely(ret)) 2263 return ret; 2264 2265 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { 2266 pr_err("Func hash %d isn't supported by device, abort\n", 2267 rss->hash_func); 2268 return -EOPNOTSUPP; 2269 } 2270 2271 memset(&cmd, 0x0, sizeof(cmd)); 2272 2273 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2274 cmd.aq_common_descriptor.flags = 2275 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2276 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION; 2277 cmd.u.flow_hash_func.init_val = rss->hash_init_val; 2278 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func; 2279 2280 ret = ena_com_mem_addr_set(ena_dev, 2281 &cmd.control_buffer.address, 2282 rss->hash_key_dma_addr); 2283 if (unlikely(ret)) { 2284 pr_err("Memory address set failed\n"); 2285 return ret; 2286 } 2287 2288 cmd.control_buffer.length = sizeof(*rss->hash_key); 2289 2290 ret = ena_com_execute_admin_command(admin_queue, 2291 (struct ena_admin_aq_entry *)&cmd, 2292 sizeof(cmd), 2293 (struct ena_admin_acq_entry *)&resp, 2294 sizeof(resp)); 2295 if (unlikely(ret)) { 2296 pr_err("Failed to set hash function %d. error: %d\n", 2297 rss->hash_func, ret); 2298 return -EINVAL; 2299 } 2300 2301 return 0; 2302} 2303 2304int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, 2305 enum ena_admin_hash_functions func, 2306 const u8 *key, u16 key_len, u32 init_val) 2307{ 2308 struct ena_admin_feature_rss_flow_hash_control *hash_key; 2309 struct ena_admin_get_feat_resp get_resp; 2310 enum ena_admin_hash_functions old_func; 2311 struct ena_rss *rss = &ena_dev->rss; 2312 int rc; 2313 2314 hash_key = rss->hash_key; 2315 2316 /* Make sure size is a mult of DWs */ 2317 if (unlikely(key_len & 0x3)) 2318 return -EINVAL; 2319 2320 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2321 ENA_ADMIN_RSS_HASH_FUNCTION, 2322 rss->hash_key_dma_addr, 2323 sizeof(*rss->hash_key), 0); 2324 if (unlikely(rc)) 2325 return rc; 2326 2327 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { 2328 pr_err("Flow hash function %d isn't supported\n", func); 2329 return -EOPNOTSUPP; 2330 } 2331 2332 switch (func) { 2333 case ENA_ADMIN_TOEPLITZ: 2334 if (key) { 2335 if (key_len != sizeof(hash_key->key)) { 2336 pr_err("key len (%hu) doesn't equal the supported size (%zu)\n", 2337 key_len, sizeof(hash_key->key)); 2338 return -EINVAL; 2339 } 2340 memcpy(hash_key->key, key, key_len); 2341 rss->hash_init_val = init_val; 2342 hash_key->key_parts = key_len / sizeof(hash_key->key[0]); 2343 } 2344 break; 2345 case ENA_ADMIN_CRC32: 2346 rss->hash_init_val = init_val; 2347 break; 2348 default: 2349 pr_err("Invalid hash function (%d)\n", func); 2350 return -EINVAL; 2351 } 2352 2353 old_func = rss->hash_func; 2354 rss->hash_func = func; 2355 rc = ena_com_set_hash_function(ena_dev); 2356 2357 /* Restore the old function */ 2358 if (unlikely(rc)) 2359 rss->hash_func = old_func; 2360 2361 return rc; 2362} 2363 2364int ena_com_get_hash_function(struct ena_com_dev *ena_dev, 2365 enum ena_admin_hash_functions *func) 2366{ 2367 struct ena_rss *rss = &ena_dev->rss; 2368 struct ena_admin_get_feat_resp get_resp; 2369 int rc; 2370 2371 if (unlikely(!func)) 2372 return -EINVAL; 2373 2374 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2375 ENA_ADMIN_RSS_HASH_FUNCTION, 2376 rss->hash_key_dma_addr, 2377 sizeof(*rss->hash_key), 0); 2378 if (unlikely(rc)) 2379 return rc; 2380 2381 /* ffs() returns 1 in case the lsb is set */ 2382 rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func); 2383 if (rss->hash_func) 2384 rss->hash_func--; 2385 2386 *func = rss->hash_func; 2387 2388 return 0; 2389} 2390 2391int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key) 2392{ 2393 struct ena_admin_feature_rss_flow_hash_control *hash_key = 2394 ena_dev->rss.hash_key; 2395 2396 if (key) 2397 memcpy(key, hash_key->key, 2398 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0])); 2399 2400 return 0; 2401} 2402 2403int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev, 2404 enum ena_admin_flow_hash_proto proto, 2405 u16 *fields) 2406{ 2407 struct ena_rss *rss = &ena_dev->rss; 2408 struct ena_admin_get_feat_resp get_resp; 2409 int rc; 2410 2411 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2412 ENA_ADMIN_RSS_HASH_INPUT, 2413 rss->hash_ctrl_dma_addr, 2414 sizeof(*rss->hash_ctrl), 0); 2415 if (unlikely(rc)) 2416 return rc; 2417 2418 if (fields) 2419 *fields = rss->hash_ctrl->selected_fields[proto].fields; 2420 2421 return 0; 2422} 2423 2424int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) 2425{ 2426 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2427 struct ena_rss *rss = &ena_dev->rss; 2428 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2429 struct ena_admin_set_feat_cmd cmd; 2430 struct ena_admin_set_feat_resp resp; 2431 int ret; 2432 2433 if (!ena_com_check_supported_feature_id(ena_dev, 2434 ENA_ADMIN_RSS_HASH_INPUT)) { 2435 pr_debug("Feature %d isn't supported\n", 2436 ENA_ADMIN_RSS_HASH_INPUT); 2437 return -EOPNOTSUPP; 2438 } 2439 2440 memset(&cmd, 0x0, sizeof(cmd)); 2441 2442 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2443 cmd.aq_common_descriptor.flags = 2444 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2445 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT; 2446 cmd.u.flow_hash_input.enabled_input_sort = 2447 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK | 2448 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; 2449 2450 ret = ena_com_mem_addr_set(ena_dev, 2451 &cmd.control_buffer.address, 2452 rss->hash_ctrl_dma_addr); 2453 if (unlikely(ret)) { 2454 pr_err("Memory address set failed\n"); 2455 return ret; 2456 } 2457 cmd.control_buffer.length = sizeof(*hash_ctrl); 2458 2459 ret = ena_com_execute_admin_command(admin_queue, 2460 (struct ena_admin_aq_entry *)&cmd, 2461 sizeof(cmd), 2462 (struct ena_admin_acq_entry *)&resp, 2463 sizeof(resp)); 2464 if (unlikely(ret)) 2465 pr_err("Failed to set hash input. error: %d\n", ret); 2466 2467 return ret; 2468} 2469 2470int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) 2471{ 2472 struct ena_rss *rss = &ena_dev->rss; 2473 struct ena_admin_feature_rss_hash_control *hash_ctrl = 2474 rss->hash_ctrl; 2475 u16 available_fields = 0; 2476 int rc, i; 2477 2478 /* Get the supported hash input */ 2479 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2480 if (unlikely(rc)) 2481 return rc; 2482 2483 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields = 2484 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2485 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2486 2487 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields = 2488 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2489 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2490 2491 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields = 2492 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2493 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2494 2495 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields = 2496 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA | 2497 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP; 2498 2499 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields = 2500 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2501 2502 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields = 2503 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2504 2505 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = 2506 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; 2507 2508 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields = 2509 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; 2510 2511 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { 2512 available_fields = hash_ctrl->selected_fields[i].fields & 2513 hash_ctrl->supported_fields[i].fields; 2514 if (available_fields != hash_ctrl->selected_fields[i].fields) { 2515 pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", 2516 i, hash_ctrl->supported_fields[i].fields, 2517 hash_ctrl->selected_fields[i].fields); 2518 return -EOPNOTSUPP; 2519 } 2520 } 2521 2522 rc = ena_com_set_hash_ctrl(ena_dev); 2523 2524 /* In case of failure, restore the old hash ctrl */ 2525 if (unlikely(rc)) 2526 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2527 2528 return rc; 2529} 2530 2531int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, 2532 enum ena_admin_flow_hash_proto proto, 2533 u16 hash_fields) 2534{ 2535 struct ena_rss *rss = &ena_dev->rss; 2536 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl; 2537 u16 supported_fields; 2538 int rc; 2539 2540 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { 2541 pr_err("Invalid proto num (%u)\n", proto); 2542 return -EINVAL; 2543 } 2544 2545 /* Get the ctrl table */ 2546 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL); 2547 if (unlikely(rc)) 2548 return rc; 2549 2550 /* Make sure all the fields are supported */ 2551 supported_fields = hash_ctrl->supported_fields[proto].fields; 2552 if ((hash_fields & supported_fields) != hash_fields) { 2553 pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n", 2554 proto, hash_fields, supported_fields); 2555 } 2556 2557 hash_ctrl->selected_fields[proto].fields = hash_fields; 2558 2559 rc = ena_com_set_hash_ctrl(ena_dev); 2560 2561 /* In case of failure, restore the old hash ctrl */ 2562 if (unlikely(rc)) 2563 ena_com_get_hash_ctrl(ena_dev, 0, NULL); 2564 2565 return 0; 2566} 2567 2568int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev, 2569 u16 entry_idx, u16 entry_value) 2570{ 2571 struct ena_rss *rss = &ena_dev->rss; 2572 2573 if (unlikely(entry_idx >= (1 << rss->tbl_log_size))) 2574 return -EINVAL; 2575 2576 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES))) 2577 return -EINVAL; 2578 2579 rss->host_rss_ind_tbl[entry_idx] = entry_value; 2580 2581 return 0; 2582} 2583 2584int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) 2585{ 2586 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; 2587 struct ena_rss *rss = &ena_dev->rss; 2588 struct ena_admin_set_feat_cmd cmd; 2589 struct ena_admin_set_feat_resp resp; 2590 int ret; 2591 2592 if (!ena_com_check_supported_feature_id( 2593 ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { 2594 pr_debug("Feature %d isn't supported\n", 2595 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); 2596 return -EOPNOTSUPP; 2597 } 2598 2599 ret = ena_com_ind_tbl_convert_to_device(ena_dev); 2600 if (ret) { 2601 pr_err("Failed to convert host indirection table to device table\n"); 2602 return ret; 2603 } 2604 2605 memset(&cmd, 0x0, sizeof(cmd)); 2606 2607 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2608 cmd.aq_common_descriptor.flags = 2609 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; 2610 cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG; 2611 cmd.u.ind_table.size = rss->tbl_log_size; 2612 cmd.u.ind_table.inline_index = 0xFFFFFFFF; 2613 2614 ret = ena_com_mem_addr_set(ena_dev, 2615 &cmd.control_buffer.address, 2616 rss->rss_ind_tbl_dma_addr); 2617 if (unlikely(ret)) { 2618 pr_err("Memory address set failed\n"); 2619 return ret; 2620 } 2621 2622 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * 2623 sizeof(struct ena_admin_rss_ind_table_entry); 2624 2625 ret = ena_com_execute_admin_command(admin_queue, 2626 (struct ena_admin_aq_entry *)&cmd, 2627 sizeof(cmd), 2628 (struct ena_admin_acq_entry *)&resp, 2629 sizeof(resp)); 2630 2631 if (unlikely(ret)) 2632 pr_err("Failed to set indirect table. error: %d\n", ret); 2633 2634 return ret; 2635} 2636 2637int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) 2638{ 2639 struct ena_rss *rss = &ena_dev->rss; 2640 struct ena_admin_get_feat_resp get_resp; 2641 u32 tbl_size; 2642 int i, rc; 2643 2644 tbl_size = (1ULL << rss->tbl_log_size) * 2645 sizeof(struct ena_admin_rss_ind_table_entry); 2646 2647 rc = ena_com_get_feature_ex(ena_dev, &get_resp, 2648 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 2649 rss->rss_ind_tbl_dma_addr, 2650 tbl_size, 0); 2651 if (unlikely(rc)) 2652 return rc; 2653 2654 if (!ind_tbl) 2655 return 0; 2656 2657 for (i = 0; i < (1 << rss->tbl_log_size); i++) 2658 ind_tbl[i] = rss->host_rss_ind_tbl[i]; 2659 2660 return 0; 2661} 2662 2663int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) 2664{ 2665 int rc; 2666 2667 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2668 2669 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size); 2670 if (unlikely(rc)) 2671 goto err_indr_tbl; 2672 2673 /* The following function might return unsupported in case the 2674 * device doesn't support setting the key / hash function. We can safely 2675 * ignore this error and have indirection table support only. 2676 */ 2677 rc = ena_com_hash_key_allocate(ena_dev); 2678 if (likely(!rc)) 2679 ena_com_hash_key_fill_default_key(ena_dev); 2680 else if (rc != -EOPNOTSUPP) 2681 goto err_hash_key; 2682 2683 rc = ena_com_hash_ctrl_init(ena_dev); 2684 if (unlikely(rc)) 2685 goto err_hash_ctrl; 2686 2687 return 0; 2688 2689err_hash_ctrl: 2690 ena_com_hash_key_destroy(ena_dev); 2691err_hash_key: 2692 ena_com_indirect_table_destroy(ena_dev); 2693err_indr_tbl: 2694 2695 return rc; 2696} 2697 2698void ena_com_rss_destroy(struct ena_com_dev *ena_dev) 2699{ 2700 ena_com_indirect_table_destroy(ena_dev); 2701 ena_com_hash_key_destroy(ena_dev); 2702 ena_com_hash_ctrl_destroy(ena_dev); 2703 2704 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); 2705} 2706 2707int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) 2708{ 2709 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2710 2711 host_attr->host_info = 2712 dma_alloc_coherent(ena_dev->dmadev, SZ_4K, 2713 &host_attr->host_info_dma_addr, GFP_KERNEL); 2714 if (unlikely(!host_attr->host_info)) 2715 return -ENOMEM; 2716 2717 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << 2718 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | 2719 (ENA_COMMON_SPEC_VERSION_MINOR)); 2720 2721 return 0; 2722} 2723 2724int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, 2725 u32 debug_area_size) 2726{ 2727 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2728 2729 host_attr->debug_area_virt_addr = 2730 dma_alloc_coherent(ena_dev->dmadev, debug_area_size, 2731 &host_attr->debug_area_dma_addr, GFP_KERNEL); 2732 if (unlikely(!host_attr->debug_area_virt_addr)) { 2733 host_attr->debug_area_size = 0; 2734 return -ENOMEM; 2735 } 2736 2737 host_attr->debug_area_size = debug_area_size; 2738 2739 return 0; 2740} 2741 2742void ena_com_delete_host_info(struct ena_com_dev *ena_dev) 2743{ 2744 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2745 2746 if (host_attr->host_info) { 2747 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info, 2748 host_attr->host_info_dma_addr); 2749 host_attr->host_info = NULL; 2750 } 2751} 2752 2753void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) 2754{ 2755 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2756 2757 if (host_attr->debug_area_virt_addr) { 2758 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, 2759 host_attr->debug_area_virt_addr, 2760 host_attr->debug_area_dma_addr); 2761 host_attr->debug_area_virt_addr = NULL; 2762 } 2763} 2764 2765int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) 2766{ 2767 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2768 struct ena_com_admin_queue *admin_queue; 2769 struct ena_admin_set_feat_cmd cmd; 2770 struct ena_admin_set_feat_resp resp; 2771 2772 int ret; 2773 2774 /* Host attribute config is called before ena_com_get_dev_attr_feat 2775 * so ena_com can't check if the feature is supported. 2776 */ 2777 2778 memset(&cmd, 0x0, sizeof(cmd)); 2779 admin_queue = &ena_dev->admin_queue; 2780 2781 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; 2782 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG; 2783 2784 ret = ena_com_mem_addr_set(ena_dev, 2785 &cmd.u.host_attr.debug_ba, 2786 host_attr->debug_area_dma_addr); 2787 if (unlikely(ret)) { 2788 pr_err("Memory address set failed\n"); 2789 return ret; 2790 } 2791 2792 ret = ena_com_mem_addr_set(ena_dev, 2793 &cmd.u.host_attr.os_info_ba, 2794 host_attr->host_info_dma_addr); 2795 if (unlikely(ret)) { 2796 pr_err("Memory address set failed\n"); 2797 return ret; 2798 } 2799 2800 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size; 2801 2802 ret = ena_com_execute_admin_command(admin_queue, 2803 (struct ena_admin_aq_entry *)&cmd, 2804 sizeof(cmd), 2805 (struct ena_admin_acq_entry *)&resp, 2806 sizeof(resp)); 2807 2808 if (unlikely(ret)) 2809 pr_err("Failed to set host attributes: %d\n", ret); 2810 2811 return ret; 2812} 2813 2814/* Interrupt moderation */ 2815bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) 2816{ 2817 return ena_com_check_supported_feature_id(ena_dev, 2818 ENA_ADMIN_INTERRUPT_MODERATION); 2819} 2820 2821static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, 2822 u32 intr_delay_resolution, 2823 u32 *intr_moder_interval) 2824{ 2825 if (!intr_delay_resolution) { 2826 pr_err("Illegal interrupt delay granularity value\n"); 2827 return -EFAULT; 2828 } 2829 2830 *intr_moder_interval = coalesce_usecs / intr_delay_resolution; 2831 2832 return 0; 2833} 2834 2835int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, 2836 u32 tx_coalesce_usecs) 2837{ 2838 return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs, 2839 ena_dev->intr_delay_resolution, 2840 &ena_dev->intr_moder_tx_interval); 2841} 2842 2843int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, 2844 u32 rx_coalesce_usecs) 2845{ 2846 return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs, 2847 ena_dev->intr_delay_resolution, 2848 &ena_dev->intr_moder_rx_interval); 2849} 2850 2851int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) 2852{ 2853 struct ena_admin_get_feat_resp get_resp; 2854 u16 delay_resolution; 2855 int rc; 2856 2857 rc = ena_com_get_feature(ena_dev, &get_resp, 2858 ENA_ADMIN_INTERRUPT_MODERATION, 0); 2859 2860 if (rc) { 2861 if (rc == -EOPNOTSUPP) { 2862 pr_debug("Feature %d isn't supported\n", 2863 ENA_ADMIN_INTERRUPT_MODERATION); 2864 rc = 0; 2865 } else { 2866 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", 2867 rc); 2868 } 2869 2870 /* no moderation supported, disable adaptive support */ 2871 ena_com_disable_adaptive_moderation(ena_dev); 2872 return rc; 2873 } 2874 2875 /* if moderation is supported by device we set adaptive moderation */ 2876 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; 2877 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); 2878 2879 /* Disable adaptive moderation by default - can be enabled later */ 2880 ena_com_disable_adaptive_moderation(ena_dev); 2881 2882 return 0; 2883} 2884 2885unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) 2886{ 2887 return ena_dev->intr_moder_tx_interval; 2888} 2889 2890unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) 2891{ 2892 return ena_dev->intr_moder_rx_interval; 2893} 2894 2895int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, 2896 struct ena_admin_feature_llq_desc *llq_features, 2897 struct ena_llq_configurations *llq_default_cfg) 2898{ 2899 struct ena_com_llq_info *llq_info = &ena_dev->llq_info; 2900 int rc; 2901 2902 if (!llq_features->max_llq_num) { 2903 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 2904 return 0; 2905 } 2906 2907 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); 2908 if (rc) 2909 return rc; 2910 2911 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - 2912 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); 2913 2914 if (unlikely(ena_dev->tx_max_header_size == 0)) { 2915 pr_err("The size of the LLQ entry is smaller than needed\n"); 2916 return -EINVAL; 2917 } 2918 2919 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; 2920 2921 return 0; 2922} 2923