Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018-2022 Marvell International Ltd. * * Support library for the hardware Packet Output unit. */ #include <errno.h> #include <log.h> #include <time.h> #include <linux/delay.h> #include <mach/cvmx-regs.h> #include <mach/cvmx-csr.h> #include <mach/cvmx-bootmem.h> #include <mach/octeon-model.h> #include <mach/cvmx-fuse.h> #include <mach/octeon-feature.h> #include <mach/cvmx-qlm.h> #include <mach/octeon_qlm.h> #include <mach/cvmx-pcie.h> #include <mach/cvmx-coremask.h> #include <mach/cvmx-agl-defs.h> #include <mach/cvmx-bgxx-defs.h> #include <mach/cvmx-ciu-defs.h> #include <mach/cvmx-gmxx-defs.h> #include <mach/cvmx-gserx-defs.h> #include <mach/cvmx-ilk-defs.h> #include <mach/cvmx-iob-defs.h> #include <mach/cvmx-ipd-defs.h> #include <mach/cvmx-pcsx-defs.h> #include <mach/cvmx-pcsxx-defs.h> #include <mach/cvmx-pki-defs.h> #include <mach/cvmx-pko-defs.h> #include <mach/cvmx-xcv-defs.h> #include <mach/cvmx-hwpko.h> #include <mach/cvmx-ilk.h> #include <mach/cvmx-pki.h> #include <mach/cvmx-pko3.h> #include <mach/cvmx-pko3-queue.h> #include <mach/cvmx-pko3-resources.h> #include <mach/cvmx-helper.h> #include <mach/cvmx-helper-board.h> #include <mach/cvmx-helper-cfg.h> #include <mach/cvmx-helper-bgx.h> #include <mach/cvmx-helper-cfg.h> #include <mach/cvmx-helper-util.h> #include <mach/cvmx-helper-pki.h> #include <mach/cvmx-helper-pko.h> DECLARE_GLOBAL_DATA_PTR; #define CVMX_PKO_NQ_PER_PORT_MAX 32 static cvmx_pko_return_value_t cvmx_pko2_config_port(short ipd_port, int base_queue, int num_queues, const u8 priority[]); static const int debug; /** * Internal state of packet output */ /* * PKO port iterator * XXX this macro only works for 68XX */ #define pko_for_each_port(__p) \ for (__p = 0; __p < CVMX_HELPER_CFG_MAX_PKO_PORT; __p++) \ if (__cvmx_helper_cfg_pko_queue_base(__p) != \ CVMX_HELPER_CFG_INVALID_VALUE) /* * @INTERNAL * * Get INT for a port * * @param interface * @param index * @return the INT value on success and -1 on error * * This function is only for CN68XX. */ static int __cvmx_pko_int(int interface, int index) { cvmx_helper_cfg_assert(interface < CVMX_HELPER_MAX_IFACE); cvmx_helper_cfg_assert(index >= 0); switch (interface) { case 0: cvmx_helper_cfg_assert(index < 4); return index; case 1: cvmx_helper_cfg_assert(index == 0); return 4; case 2: cvmx_helper_cfg_assert(index < 4); return index + 8; case 3: cvmx_helper_cfg_assert(index < 4); return index + 0xC; case 4: cvmx_helper_cfg_assert(index < 4); return index + 0x10; case 5: cvmx_helper_cfg_assert(index < 256); return 0x1C; case 6: cvmx_helper_cfg_assert(index < 256); return 0x1D; case 7: cvmx_helper_cfg_assert(index < 32); return 0x1E; case 8: cvmx_helper_cfg_assert(index < 8); return 0x1F; } return -1; } int cvmx_pko_get_base_pko_port(int interface, int index) { if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) return cvmx_helper_get_ipd_port(interface, index); else if (octeon_has_feature(OCTEON_FEATURE_PKND)) return __cvmx_helper_cfg_pko_port_base(interface, index); else return cvmx_helper_get_ipd_port(interface, index); } int cvmx_pko_get_base_queue(int port) { if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) { return cvmx_pko3_get_queue_base(port); } else if (octeon_has_feature(OCTEON_FEATURE_PKND)) { return __cvmx_helper_cfg_pko_queue_base( cvmx_helper_cfg_ipd2pko_port_base(port)); } else { if (port < 48) return cvmx_pko_queue_table[port].ccppp_queue_base; else return CVMX_PKO_ILLEGAL_QUEUE; } } int cvmx_pko_get_num_queues(int port) { if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) { return cvmx_pko3_get_queue_num(port); } else if (octeon_has_feature(OCTEON_FEATURE_PKND)) { return __cvmx_helper_cfg_pko_queue_num( cvmx_helper_cfg_ipd2pko_port_base(port)); } else { if (port < 48) return cvmx_pko_queue_table[port].ccppp_num_queues; } return 0; } /* * Allocate memory for PKO engines. * * @param engine is the PKO engine ID. * @return # of 2KB-chunks allocated to this PKO engine. */ static int __cvmx_pko_memory_per_engine_o68(int engine) { /* CN68XX has 40KB to devide between the engines in 2KB chunks */ int max_engine; int size_per_engine; int size; max_engine = __cvmx_helper_cfg_pko_max_engine(); size_per_engine = 40 / 2 / max_engine; if (engine >= max_engine) /* Unused engines get no space */ size = 0; else if (engine == max_engine - 1) /* * The last engine gets all the space lost by rounding. This means * the ILK gets the most space */ size = 40 / 2 - engine * size_per_engine; else /* All other engines get the same space */ size = size_per_engine; return size; } /* * Setup one-to-one mapping between PKO2 iport and eport. * @INTERNAL */ static void __cvmx_pko2_chip_init(void) { int i; int interface, index, port; cvmx_helper_interface_mode_t mode; union cvmx_pko_mem_iport_ptrs config; /* * Initialize every iport with the invalid eid. */ #define CVMX_O68_PKO2_INVALID_EID 31 config.u64 = 0; config.s.eid = CVMX_O68_PKO2_INVALID_EID; for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++) { config.s.ipid = i; csr_wr(CVMX_PKO_MEM_IPORT_PTRS, config.u64); } /* * Set up PKO_MEM_IPORT_PTRS */ pko_for_each_port(port) { interface = __cvmx_helper_cfg_pko_port_interface(port); index = __cvmx_helper_cfg_pko_port_index(port); mode = cvmx_helper_interface_get_mode(interface); if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED) continue; config.s.ipid = port; config.s.qos_mask = 0xff; config.s.crc = __cvmx_helper_get_has_fcs(interface); config.s.min_pkt = __cvmx_helper_get_pko_padding(interface); config.s.intr = __cvmx_pko_int(interface, index); config.s.eid = __cvmx_helper_cfg_pko_port_eid(port); config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ? index : port; csr_wr(CVMX_PKO_MEM_IPORT_PTRS, config.u64); } } int __cvmx_pko_get_pipe(int interface, int index) { /* The loopback ports do not have pipes */ if (cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_LOOP) return -1; /* We use pko_port as the pipe. See __cvmx_pko_port_map_o68(). */ return cvmx_helper_get_pko_port(interface, index); } static void __cvmx_pko1_chip_init(void) { int queue; union cvmx_pko_mem_queue_ptrs config; union cvmx_pko_reg_queue_ptrs1 config1; const int port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID; /* Initialize all queues to connect to port 63 (ILLEGAL_PID) */ for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) { config1.u64 = 0; config1.s.idx3 = 0; config1.s.qid7 = queue >> 7; config.u64 = 0; config.s.tail = 1; config.s.index = 0; config.s.port = port; config.s.queue = queue; config.s.buf_ptr = 0; csr_wr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); csr_wr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); } } /** * Call before any other calls to initialize the packet * output system. This does chip global config, and should only be * done by one core. */ void cvmx_pko_hw_init(u8 pool, unsigned int bufsize) { union cvmx_pko_reg_cmd_buf config; union cvmx_iob_fau_timeout fau_to; int i; if (debug) debug("%s: pool=%u bufsz=%u\n", __func__, pool, bufsize); /* chip-specific setup. */ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) __cvmx_pko2_chip_init(); else __cvmx_pko1_chip_init(); /* * Set the size of the PKO command buffers to an odd number of * 64bit words. This allows the normal two word send to stay * aligned and never span a command word buffer. */ config.u64 = 0; config.s.pool = pool; config.s.size = bufsize / 8 - 1; csr_wr(CVMX_PKO_REG_CMD_BUF, config.u64); /* * Disable tagwait FAU timeout. This needs to be done before * anyone might start packet output using tags. */ fau_to.u64 = 0; fau_to.s.tout_val = 0xfff; fau_to.s.tout_enb = 0; csr_wr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64); if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { union cvmx_pko_reg_min_pkt min_pkt; min_pkt.u64 = 0; min_pkt.s.size1 = 59; min_pkt.s.size2 = 59; min_pkt.s.size3 = 59; min_pkt.s.size4 = 59; min_pkt.s.size5 = 59; min_pkt.s.size6 = 59; min_pkt.s.size7 = 59; csr_wr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64); } /* * If we aren't using all of the queues optimize PKO's * internal memory. */ if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) { int max_queues = __cvmx_helper_cfg_pko_max_queue(); if (OCTEON_IS_MODEL(OCTEON_CN68XX) && max_queues <= 32) csr_wr(CVMX_PKO_REG_QUEUE_MODE, 3); else if (max_queues <= 64) csr_wr(CVMX_PKO_REG_QUEUE_MODE, 2); else if (max_queues <= 128) csr_wr(CVMX_PKO_REG_QUEUE_MODE, 1); else csr_wr(CVMX_PKO_REG_QUEUE_MODE, 0); if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { for (i = 0; i < 2; i++) { union cvmx_pko_reg_engine_storagex engine_storage; #define PKO_ASSIGN_ENGINE_STORAGE(index) \ engine_storage.s.engine##index = \ __cvmx_pko_memory_per_engine_o68(16 * i + (index)) engine_storage.u64 = 0; PKO_ASSIGN_ENGINE_STORAGE(0); PKO_ASSIGN_ENGINE_STORAGE(1); PKO_ASSIGN_ENGINE_STORAGE(2); PKO_ASSIGN_ENGINE_STORAGE(3); PKO_ASSIGN_ENGINE_STORAGE(4); PKO_ASSIGN_ENGINE_STORAGE(5); PKO_ASSIGN_ENGINE_STORAGE(6); PKO_ASSIGN_ENGINE_STORAGE(7); PKO_ASSIGN_ENGINE_STORAGE(8); PKO_ASSIGN_ENGINE_STORAGE(9); PKO_ASSIGN_ENGINE_STORAGE(10); PKO_ASSIGN_ENGINE_STORAGE(11); PKO_ASSIGN_ENGINE_STORAGE(12); PKO_ASSIGN_ENGINE_STORAGE(13); PKO_ASSIGN_ENGINE_STORAGE(14); PKO_ASSIGN_ENGINE_STORAGE(15); csr_wr(CVMX_PKO_REG_ENGINE_STORAGEX(i), engine_storage.u64); } } } } /** * Enables the packet output hardware. It must already be * configured. */ void cvmx_pko_enable(void) { union cvmx_pko_reg_flags flags; flags.u64 = csr_rd(CVMX_PKO_REG_FLAGS); if (flags.s.ena_pko) debug("Warning: Enabling PKO when PKO already enabled.\n"); flags.s.ena_dwb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB); flags.s.ena_pko = 1; /* * always enable big endian for 3-word command. Does nothing * for 2-word. */ flags.s.store_be = 1; csr_wr(CVMX_PKO_REG_FLAGS, flags.u64); } /** * Configure a output port and the associated queues for use. * * @param port Port to configure. * @param base_queue First queue number to associate with this port. * @param num_queues Number of queues to associate with this port * @param priority Array of priority levels for each queue. Values are * allowed to be 0-8. A value of 8 get 8 times the traffic * of a value of 1. A value of 0 indicates that no rounds * will be participated in. These priorities can be changed * on the fly while the pko is enabled. A priority of 9 * indicates that static priority should be used. If static * priority is used all queues with static priority must be * contiguous starting at the base_queue, and lower numbered * queues have higher priority than higher numbered queues. * There must be num_queues elements in the array. */ cvmx_pko_return_value_t cvmx_pko_config_port(int port, int base_queue, int num_queues, const u8 priority[]) { cvmx_pko_return_value_t result_code; int queue; union cvmx_pko_mem_queue_ptrs config; union cvmx_pko_reg_queue_ptrs1 config1; int static_priority_base = -1; int static_priority_end = -1; int outputbuffer_pool = (int)cvmx_fpa_get_pko_pool(); u64 outputbuffer_pool_size = cvmx_fpa_get_pko_pool_block_size(); /* This function is not used for CN68XX */ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) return cvmx_pko2_config_port(port, base_queue, num_queues, priority); if (debug) debug("%s: port=%d queue=%d-%d pri %#x %#x %#x %#x\n", __func__, port, base_queue, (base_queue + num_queues - 1), priority[0], priority[1], priority[2], priority[3]); /* The need to handle ILLEGAL_PID port argument * is obsolete now, the code here can be simplified. */ if (port >= CVMX_PKO_NUM_OUTPUT_PORTS && port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { debug("ERROR: %s: Invalid port %llu\n", __func__, (unsigned long long)port); return CVMX_PKO_INVALID_PORT; } if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) { debug("ERROR: %s: Invalid queue range port = %lld base=%llu numques=%lld\n", __func__, (unsigned long long)port, (unsigned long long)base_queue, (unsigned long long)num_queues); return CVMX_PKO_INVALID_QUEUE; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { /* * Validate the static queue priority setup and set * static_priority_base and static_priority_end * accordingly. */ for (queue = 0; queue < num_queues; queue++) { /* Find first queue of static priority */ int p_queue = queue % 16; if (static_priority_base == -1 && priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) static_priority_base = queue; /* Find last queue of static priority */ if (static_priority_base != -1 && static_priority_end == -1 && priority[p_queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY && queue) static_priority_end = queue - 1; else if (static_priority_base != -1 && static_priority_end == -1 && queue == num_queues - 1) /* all queues're static priority */ static_priority_end = queue; /* * Check to make sure all static priority * queues are contiguous. Also catches some * cases of static priorites not starting at * queue 0. */ if (static_priority_end != -1 && (int)queue > static_priority_end && priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) { debug("ERROR: %s: Static priority queues aren't contiguous or don't start at base queue. q: %d, eq: %d\n", __func__, (int)queue, static_priority_end); return CVMX_PKO_INVALID_PRIORITY; } } if (static_priority_base > 0) { debug("ERROR: %s: Static priority queues don't start at base queue. sq: %d\n", __func__, static_priority_base); return CVMX_PKO_INVALID_PRIORITY; } } /* * At this point, static_priority_base and static_priority_end * are either both -1, or are valid start/end queue numbers */ result_code = CVMX_PKO_SUCCESS; for (queue = 0; queue < num_queues; queue++) { u64 *buf_ptr = NULL; int p_queue = queue % 16; config1.u64 = 0; config1.s.idx3 = queue >> 3; config1.s.qid7 = (base_queue + queue) >> 7; config.u64 = 0; config.s.tail = queue == (num_queues - 1); config.s.index = queue; config.s.port = port; config.s.queue = base_queue + queue; config.s.static_p = static_priority_base >= 0; config.s.static_q = (int)queue <= static_priority_end; config.s.s_tail = (int)queue == static_priority_end; /* * Convert the priority into an enable bit field. Try * to space the bits out evenly so the packet don't * get grouped up. */ switch ((int)priority[p_queue]) { case 0: config.s.qos_mask = 0x00; break; case 1: config.s.qos_mask = 0x01; break; case 2: config.s.qos_mask = 0x11; break; case 3: config.s.qos_mask = 0x49; break; case 4: config.s.qos_mask = 0x55; break; case 5: config.s.qos_mask = 0x57; break; case 6: config.s.qos_mask = 0x77; break; case 7: config.s.qos_mask = 0x7f; break; case 8: config.s.qos_mask = 0xff; break; case CVMX_PKO_QUEUE_STATIC_PRIORITY: config.s.qos_mask = 0xff; break; default: debug("ERROR: %s: Invalid priority %llu\n", __func__, (unsigned long long)priority[p_queue]); config.s.qos_mask = 0xff; result_code = CVMX_PKO_INVALID_PRIORITY; break; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { cvmx_cmd_queue_result_t cmd_res; cmd_res = cvmx_cmd_queue_initialize( CVMX_CMD_QUEUE_PKO(base_queue + queue), CVMX_PKO_MAX_QUEUE_DEPTH, outputbuffer_pool, outputbuffer_pool_size - CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8); if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) { switch (cmd_res) { case CVMX_CMD_QUEUE_NO_MEMORY: debug("ERROR: %s: Unable to allocate output buffer\n", __func__); return CVMX_PKO_NO_MEMORY; case CVMX_CMD_QUEUE_ALREADY_SETUP: debug("ERROR: %s: Port already setup. port=%d\n", __func__, (int)port); return CVMX_PKO_PORT_ALREADY_SETUP; case CVMX_CMD_QUEUE_INVALID_PARAM: default: debug("ERROR: %s: Command queue initialization failed.\n", __func__); return CVMX_PKO_CMD_QUEUE_INIT_ERROR; } } buf_ptr = (u64 *)cvmx_cmd_queue_buffer( CVMX_CMD_QUEUE_PKO(base_queue + queue)); config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr); } else { config.s.buf_ptr = 0; } CVMX_SYNCWS; csr_wr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); csr_wr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); } return result_code; } /* * Configure queues for an internal port. * @INTERNAL * @param pko_port PKO internal port number * @note this is the PKO2 equivalent to cvmx_pko_config_port() */ static cvmx_pko_return_value_t cvmx_pko2_config_port(short ipd_port, int base_queue, int num_queues, const u8 priority[]) { int queue, pko_port; int static_priority_base; int static_priority_end; union cvmx_pko_mem_iqueue_ptrs config; u64 *buf_ptr = NULL; int outputbuffer_pool = (int)cvmx_fpa_get_pko_pool(); u64 outputbuffer_pool_size = cvmx_fpa_get_pko_pool_block_size(); pko_port = cvmx_helper_cfg_ipd2pko_port_base(ipd_port); if (debug) debug("%s: ipd_port %d pko_iport %d qbase %d qnum %d\n", __func__, ipd_port, pko_port, base_queue, num_queues); static_priority_base = -1; static_priority_end = -1; /* * static queue priority validation */ for (queue = 0; queue < num_queues; queue++) { int p_queue = queue % 16; if (static_priority_base == -1 && priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) static_priority_base = queue; if (static_priority_base != -1 && static_priority_end == -1 && priority[p_queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY && queue) static_priority_end = queue - 1; else if (static_priority_base != -1 && static_priority_end == -1 && queue == num_queues - 1) static_priority_end = queue; /* all queues are static priority */ /* * Check to make sure all static priority queues are contiguous. * Also catches some cases of static priorites not starting from * queue 0. */ if (static_priority_end != -1 && (int)queue > static_priority_end && priority[p_queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) { debug("ERROR: %s: Static priority queues aren't contiguous or don't start at base queue. q: %d, eq: %d\n", __func__, (int)queue, static_priority_end); } if (static_priority_base > 0) { debug("ERROR: %s: Static priority queues don't start at base queue. sq: %d\n", __func__, static_priority_base); } } /* * main loop to set the fields of CVMX_PKO_MEM_IQUEUE_PTRS for * each queue */ for (queue = 0; queue < num_queues; queue++) { int p_queue = queue % 8; config.u64 = 0; config.s.index = queue; config.s.qid = base_queue + queue; config.s.ipid = pko_port; config.s.tail = (queue == (num_queues - 1)); config.s.s_tail = (queue == static_priority_end); config.s.static_p = (static_priority_base >= 0); config.s.static_q = (queue <= static_priority_end); /* * Convert the priority into an enable bit field. * Try to space the bits out evenly so the packet * don't get grouped up. */ switch ((int)priority[p_queue]) { case 0: config.s.qos_mask = 0x00; break; case 1: config.s.qos_mask = 0x01; break; case 2: config.s.qos_mask = 0x11; break; case 3: config.s.qos_mask = 0x49; break; case 4: config.s.qos_mask = 0x55; break; case 5: config.s.qos_mask = 0x57; break; case 6: config.s.qos_mask = 0x77; break; case 7: config.s.qos_mask = 0x7f; break; case 8: config.s.qos_mask = 0xff; break; case CVMX_PKO_QUEUE_STATIC_PRIORITY: config.s.qos_mask = 0xff; break; default: debug("ERROR: %s: Invalid priority %llu\n", __func__, (unsigned long long)priority[p_queue]); config.s.qos_mask = 0xff; break; } /* * The command queues */ { cvmx_cmd_queue_result_t cmd_res; cmd_res = cvmx_cmd_queue_initialize( CVMX_CMD_QUEUE_PKO(base_queue + queue), CVMX_PKO_MAX_QUEUE_DEPTH, outputbuffer_pool, (outputbuffer_pool_size - CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8)); if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) { switch (cmd_res) { case CVMX_CMD_QUEUE_NO_MEMORY: debug("ERROR: %s: Unable to allocate output buffer\n", __func__); break; case CVMX_CMD_QUEUE_ALREADY_SETUP: debug("ERROR: %s: Port already setup\n", __func__); break; case CVMX_CMD_QUEUE_INVALID_PARAM: default: debug("ERROR: %s: Command queue initialization failed.", __func__); break; } debug(" pko_port%d base_queue%d num_queues%d queue%d.\n", pko_port, base_queue, num_queues, queue); } buf_ptr = (u64 *)cvmx_cmd_queue_buffer( CVMX_CMD_QUEUE_PKO(base_queue + queue)); config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7; } CVMX_SYNCWS; csr_wr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64); } /* Error detection is resirable here */ return 0; } |