| 1 |
/*- |
| 2 |
* Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. |
| 3 |
* |
| 4 |
* Redistribution and use in source and binary forms, with or without |
| 5 |
* modification, are permitted provided that the following conditions |
| 6 |
* are met: |
| 7 |
* |
| 8 |
* 1. Redistributions of source code must retain the above copyright |
| 9 |
* notice, this list of conditions and the following disclaimer. |
| 10 |
* 2. Redistributions in binary form must reproduce the above copyright |
| 11 |
* notice, this list of conditions and the following disclaimer in the |
| 12 |
* documentation and/or other materials provided with the distribution. |
| 13 |
* |
| 14 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' |
| 15 |
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 16 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 17 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS |
| 18 |
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 19 |
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 20 |
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 21 |
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 22 |
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 23 |
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
| 24 |
* THE POSSIBILITY OF SUCH DAMAGE. |
| 25 |
*/ |
| 26 |
|
| 27 |
#include <sys/cdefs.h> |
| 28 |
__FBSDID("$FreeBSD$"); |
| 29 |
|
| 30 |
#define BXE_DRIVER_VERSION "1.78.78" |
| 31 |
|
| 32 |
#include "bxe.h" |
| 33 |
#include "ecore_sp.h" |
| 34 |
#include "ecore_init.h" |
| 35 |
#include "ecore_init_ops.h" |
| 36 |
|
| 37 |
#include "57710_int_offsets.h" |
| 38 |
#include "57711_int_offsets.h" |
| 39 |
#include "57712_int_offsets.h" |
| 40 |
|
| 41 |
/* |
| 42 |
* CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these |
| 43 |
* explicitly here for older kernels that don't include this changeset. |
| 44 |
*/ |
| 45 |
#ifndef CTLTYPE_U64 |
| 46 |
#define CTLTYPE_U64 CTLTYPE_QUAD |
| 47 |
#define sysctl_handle_64 sysctl_handle_quad |
| 48 |
#endif |
| 49 |
|
| 50 |
/* |
| 51 |
* CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these |
| 52 |
* here as zero(0) for older kernels that don't include this changeset |
| 53 |
* thereby masking the functionality. |
| 54 |
*/ |
| 55 |
#ifndef CSUM_TCP_IPV6 |
| 56 |
#define CSUM_TCP_IPV6 0 |
| 57 |
#define CSUM_UDP_IPV6 0 |
| 58 |
#endif |
| 59 |
|
| 60 |
/* |
| 61 |
* pci_find_cap was added in r219865. Re-define this at pci_find_extcap |
| 62 |
* for older kernels that don't include this changeset. |
| 63 |
*/ |
| 64 |
#if __FreeBSD_version < 900035 |
| 65 |
#define pci_find_cap pci_find_extcap |
| 66 |
#endif |
| 67 |
|
| 68 |
#define BXE_DEF_SB_ATT_IDX 0x0001 |
| 69 |
#define BXE_DEF_SB_IDX 0x0002 |
| 70 |
|
| 71 |
/* |
| 72 |
* FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per |
| 73 |
* function HW initialization. |
| 74 |
*/ |
| 75 |
#define FLR_WAIT_USEC 10000 /* 10 msecs */ |
| 76 |
#define FLR_WAIT_INTERVAL 50 /* usecs */ |
| 77 |
#define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ |
| 78 |
|
| 79 |
struct pbf_pN_buf_regs { |
| 80 |
int pN; |
| 81 |
uint32_t init_crd; |
| 82 |
uint32_t crd; |
| 83 |
uint32_t crd_freed; |
| 84 |
}; |
| 85 |
|
| 86 |
struct pbf_pN_cmd_regs { |
| 87 |
int pN; |
| 88 |
uint32_t lines_occup; |
| 89 |
uint32_t lines_freed; |
| 90 |
}; |
| 91 |
|
| 92 |
/* |
| 93 |
* PCI Device ID Table used by bxe_probe(). |
| 94 |
*/ |
| 95 |
#define BXE_DEVDESC_MAX 64 |
| 96 |
static struct bxe_device_type bxe_devs[] = { |
| 97 |
{ |
| 98 |
BRCM_VENDORID, |
| 99 |
CHIP_NUM_57710, |
| 100 |
PCI_ANY_ID, PCI_ANY_ID, |
| 101 |
"QLogic NetXtreme II BCM57710 10GbE" |
| 102 |
}, |
| 103 |
{ |
| 104 |
BRCM_VENDORID, |
| 105 |
CHIP_NUM_57711, |
| 106 |
PCI_ANY_ID, PCI_ANY_ID, |
| 107 |
"QLogic NetXtreme II BCM57711 10GbE" |
| 108 |
}, |
| 109 |
{ |
| 110 |
BRCM_VENDORID, |
| 111 |
CHIP_NUM_57711E, |
| 112 |
PCI_ANY_ID, PCI_ANY_ID, |
| 113 |
"QLogic NetXtreme II BCM57711E 10GbE" |
| 114 |
}, |
| 115 |
{ |
| 116 |
BRCM_VENDORID, |
| 117 |
CHIP_NUM_57712, |
| 118 |
PCI_ANY_ID, PCI_ANY_ID, |
| 119 |
"QLogic NetXtreme II BCM57712 10GbE" |
| 120 |
}, |
| 121 |
{ |
| 122 |
BRCM_VENDORID, |
| 123 |
CHIP_NUM_57712_MF, |
| 124 |
PCI_ANY_ID, PCI_ANY_ID, |
| 125 |
"QLogic NetXtreme II BCM57712 MF 10GbE" |
| 126 |
}, |
| 127 |
#if 0 |
| 128 |
{ |
| 129 |
BRCM_VENDORID, |
| 130 |
CHIP_NUM_57712_VF, |
| 131 |
PCI_ANY_ID, PCI_ANY_ID, |
| 132 |
"QLogic NetXtreme II BCM57712 VF 10GbE" |
| 133 |
}, |
| 134 |
#endif |
| 135 |
{ |
| 136 |
BRCM_VENDORID, |
| 137 |
CHIP_NUM_57800, |
| 138 |
PCI_ANY_ID, PCI_ANY_ID, |
| 139 |
"QLogic NetXtreme II BCM57800 10GbE" |
| 140 |
}, |
| 141 |
{ |
| 142 |
BRCM_VENDORID, |
| 143 |
CHIP_NUM_57800_MF, |
| 144 |
PCI_ANY_ID, PCI_ANY_ID, |
| 145 |
"QLogic NetXtreme II BCM57800 MF 10GbE" |
| 146 |
}, |
| 147 |
#if 0 |
| 148 |
{ |
| 149 |
BRCM_VENDORID, |
| 150 |
CHIP_NUM_57800_VF, |
| 151 |
PCI_ANY_ID, PCI_ANY_ID, |
| 152 |
"QLogic NetXtreme II BCM57800 VF 10GbE" |
| 153 |
}, |
| 154 |
#endif |
| 155 |
{ |
| 156 |
BRCM_VENDORID, |
| 157 |
CHIP_NUM_57810, |
| 158 |
PCI_ANY_ID, PCI_ANY_ID, |
| 159 |
"QLogic NetXtreme II BCM57810 10GbE" |
| 160 |
}, |
| 161 |
{ |
| 162 |
BRCM_VENDORID, |
| 163 |
CHIP_NUM_57810_MF, |
| 164 |
PCI_ANY_ID, PCI_ANY_ID, |
| 165 |
"QLogic NetXtreme II BCM57810 MF 10GbE" |
| 166 |
}, |
| 167 |
#if 0 |
| 168 |
{ |
| 169 |
BRCM_VENDORID, |
| 170 |
CHIP_NUM_57810_VF, |
| 171 |
PCI_ANY_ID, PCI_ANY_ID, |
| 172 |
"QLogic NetXtreme II BCM57810 VF 10GbE" |
| 173 |
}, |
| 174 |
#endif |
| 175 |
{ |
| 176 |
BRCM_VENDORID, |
| 177 |
CHIP_NUM_57811, |
| 178 |
PCI_ANY_ID, PCI_ANY_ID, |
| 179 |
"QLogic NetXtreme II BCM57811 10GbE" |
| 180 |
}, |
| 181 |
{ |
| 182 |
BRCM_VENDORID, |
| 183 |
CHIP_NUM_57811_MF, |
| 184 |
PCI_ANY_ID, PCI_ANY_ID, |
| 185 |
"QLogic NetXtreme II BCM57811 MF 10GbE" |
| 186 |
}, |
| 187 |
#if 0 |
| 188 |
{ |
| 189 |
BRCM_VENDORID, |
| 190 |
CHIP_NUM_57811_VF, |
| 191 |
PCI_ANY_ID, PCI_ANY_ID, |
| 192 |
"QLogic NetXtreme II BCM57811 VF 10GbE" |
| 193 |
}, |
| 194 |
#endif |
| 195 |
{ |
| 196 |
BRCM_VENDORID, |
| 197 |
CHIP_NUM_57840_4_10, |
| 198 |
PCI_ANY_ID, PCI_ANY_ID, |
| 199 |
"QLogic NetXtreme II BCM57840 4x10GbE" |
| 200 |
}, |
| 201 |
#if 0 |
| 202 |
{ |
| 203 |
BRCM_VENDORID, |
| 204 |
CHIP_NUM_57840_2_20, |
| 205 |
PCI_ANY_ID, PCI_ANY_ID, |
| 206 |
"QLogic NetXtreme II BCM57840 2x20GbE" |
| 207 |
}, |
| 208 |
#endif |
| 209 |
{ |
| 210 |
BRCM_VENDORID, |
| 211 |
CHIP_NUM_57840_MF, |
| 212 |
PCI_ANY_ID, PCI_ANY_ID, |
| 213 |
"QLogic NetXtreme II BCM57840 MF 10GbE" |
| 214 |
}, |
| 215 |
#if 0 |
| 216 |
{ |
| 217 |
BRCM_VENDORID, |
| 218 |
CHIP_NUM_57840_VF, |
| 219 |
PCI_ANY_ID, PCI_ANY_ID, |
| 220 |
"QLogic NetXtreme II BCM57840 VF 10GbE" |
| 221 |
}, |
| 222 |
#endif |
| 223 |
{ |
| 224 |
0, 0, 0, 0, NULL |
| 225 |
} |
| 226 |
}; |
| 227 |
|
| 228 |
MALLOC_DECLARE(M_BXE_ILT); |
| 229 |
MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); |
| 230 |
|
| 231 |
/* |
| 232 |
* FreeBSD device entry points. |
| 233 |
*/ |
| 234 |
static int bxe_probe(device_t); |
| 235 |
static int bxe_attach(device_t); |
| 236 |
static int bxe_detach(device_t); |
| 237 |
static int bxe_shutdown(device_t); |
| 238 |
|
| 239 |
/* |
| 240 |
* FreeBSD KLD module/device interface event handler method. |
| 241 |
*/ |
| 242 |
static device_method_t bxe_methods[] = { |
| 243 |
/* Device interface (device_if.h) */ |
| 244 |
DEVMETHOD(device_probe, bxe_probe), |
| 245 |
DEVMETHOD(device_attach, bxe_attach), |
| 246 |
DEVMETHOD(device_detach, bxe_detach), |
| 247 |
DEVMETHOD(device_shutdown, bxe_shutdown), |
| 248 |
#if 0 |
| 249 |
DEVMETHOD(device_suspend, bxe_suspend), |
| 250 |
DEVMETHOD(device_resume, bxe_resume), |
| 251 |
#endif |
| 252 |
/* Bus interface (bus_if.h) */ |
| 253 |
DEVMETHOD(bus_print_child, bus_generic_print_child), |
| 254 |
DEVMETHOD(bus_driver_added, bus_generic_driver_added), |
| 255 |
KOBJMETHOD_END |
| 256 |
}; |
| 257 |
|
| 258 |
/* |
| 259 |
* FreeBSD KLD Module data declaration |
| 260 |
*/ |
| 261 |
static driver_t bxe_driver = { |
| 262 |
"bxe", /* module name */ |
| 263 |
bxe_methods, /* event handler */ |
| 264 |
sizeof(struct bxe_softc) /* extra data */ |
| 265 |
}; |
| 266 |
|
| 267 |
/* |
| 268 |
* FreeBSD dev class is needed to manage dev instances and |
| 269 |
* to associate with a bus type |
| 270 |
*/ |
| 271 |
static devclass_t bxe_devclass; |
| 272 |
|
| 273 |
MODULE_DEPEND(bxe, pci, 1, 1, 1); |
| 274 |
MODULE_DEPEND(bxe, ether, 1, 1, 1); |
| 275 |
DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); |
| 276 |
|
| 277 |
/* resources needed for unloading a previously loaded device */ |
| 278 |
|
| 279 |
#define BXE_PREV_WAIT_NEEDED 1 |
| 280 |
struct mtx bxe_prev_mtx; |
| 281 |
MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); |
| 282 |
struct bxe_prev_list_node { |
| 283 |
LIST_ENTRY(bxe_prev_list_node) node; |
| 284 |
uint8_t bus; |
| 285 |
uint8_t slot; |
| 286 |
uint8_t path; |
| 287 |
uint8_t aer; /* XXX automatic error recovery */ |
| 288 |
uint8_t undi; |
| 289 |
}; |
| 290 |
static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); |
| 291 |
|
| 292 |
static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ |
| 293 |
|
| 294 |
/* Tunable device values... */ |
| 295 |
|
| 296 |
SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); |
| 297 |
|
| 298 |
/* Debug */ |
| 299 |
unsigned long bxe_debug = 0; |
| 300 |
TUNABLE_ULONG("hw.bxe.debug", &bxe_debug); |
| 301 |
SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, (CTLFLAG_RDTUN), |
| 302 |
&bxe_debug, 0, "Debug logging mode"); |
| 303 |
|
| 304 |
/* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ |
| 305 |
static int bxe_interrupt_mode = INTR_MODE_MSIX; |
| 306 |
TUNABLE_INT("hw.bxe.interrupt_mode", &bxe_interrupt_mode); |
| 307 |
SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, |
| 308 |
&bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); |
| 309 |
|
| 310 |
/* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ |
| 311 |
static int bxe_queue_count = 4; |
| 312 |
TUNABLE_INT("hw.bxe.queue_count", &bxe_queue_count); |
| 313 |
SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, |
| 314 |
&bxe_queue_count, 0, "Multi-Queue queue count"); |
| 315 |
|
| 316 |
/* max number of buffers per queue (default RX_BD_USABLE) */ |
| 317 |
static int bxe_max_rx_bufs = 0; |
| 318 |
TUNABLE_INT("hw.bxe.max_rx_bufs", &bxe_max_rx_bufs); |
| 319 |
SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, |
| 320 |
&bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); |
| 321 |
|
| 322 |
/* Host interrupt coalescing RX tick timer (usecs) */ |
| 323 |
static int bxe_hc_rx_ticks = 25; |
| 324 |
TUNABLE_INT("hw.bxe.hc_rx_ticks", &bxe_hc_rx_ticks); |
| 325 |
SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, |
| 326 |
&bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); |
| 327 |
|
| 328 |
/* Host interrupt coalescing TX tick timer (usecs) */ |
| 329 |
static int bxe_hc_tx_ticks = 50; |
| 330 |
TUNABLE_INT("hw.bxe.hc_tx_ticks", &bxe_hc_tx_ticks); |
| 331 |
SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, |
| 332 |
&bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); |
| 333 |
|
| 334 |
/* Maximum number of Rx packets to process at a time */ |
| 335 |
static int bxe_rx_budget = 0xffffffff; |
| 336 |
TUNABLE_INT("hw.bxe.rx_budget", &bxe_rx_budget); |
| 337 |
SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, |
| 338 |
&bxe_rx_budget, 0, "Rx processing budget"); |
| 339 |
|
| 340 |
/* Maximum LRO aggregation size */ |
| 341 |
static int bxe_max_aggregation_size = 0; |
| 342 |
TUNABLE_INT("hw.bxe.max_aggregation_size", &bxe_max_aggregation_size); |
| 343 |
SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, |
| 344 |
&bxe_max_aggregation_size, 0, "max aggregation size"); |
| 345 |
|
| 346 |
/* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ |
| 347 |
static int bxe_mrrs = -1; |
| 348 |
TUNABLE_INT("hw.bxe.mrrs", &bxe_mrrs); |
| 349 |
SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, |
| 350 |
&bxe_mrrs, 0, "PCIe maximum read request size"); |
| 351 |
|
| 352 |
/* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ |
| 353 |
static int bxe_autogreeen = 0; |
| 354 |
TUNABLE_INT("hw.bxe.autogreeen", &bxe_autogreeen); |
| 355 |
SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, |
| 356 |
&bxe_autogreeen, 0, "AutoGrEEEn support"); |
| 357 |
|
| 358 |
/* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ |
| 359 |
static int bxe_udp_rss = 0; |
| 360 |
TUNABLE_INT("hw.bxe.udp_rss", &bxe_udp_rss); |
| 361 |
SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, |
| 362 |
&bxe_udp_rss, 0, "UDP RSS support"); |
| 363 |
|
| 364 |
|
| 365 |
#define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ |
| 366 |
|
| 367 |
#define STATS_OFFSET32(stat_name) \ |
| 368 |
(offsetof(struct bxe_eth_stats, stat_name) / 4) |
| 369 |
|
| 370 |
#define Q_STATS_OFFSET32(stat_name) \ |
| 371 |
(offsetof(struct bxe_eth_q_stats, stat_name) / 4) |
| 372 |
|
| 373 |
static const struct { |
| 374 |
uint32_t offset; |
| 375 |
uint32_t size; |
| 376 |
uint32_t flags; |
| 377 |
#define STATS_FLAGS_PORT 1 |
| 378 |
#define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ |
| 379 |
#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) |
| 380 |
char string[STAT_NAME_LEN]; |
| 381 |
} bxe_eth_stats_arr[] = { |
| 382 |
{ STATS_OFFSET32(total_bytes_received_hi), |
| 383 |
8, STATS_FLAGS_BOTH, "rx_bytes" }, |
| 384 |
{ STATS_OFFSET32(error_bytes_received_hi), |
| 385 |
8, STATS_FLAGS_BOTH, "rx_error_bytes" }, |
| 386 |
{ STATS_OFFSET32(total_unicast_packets_received_hi), |
| 387 |
8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, |
| 388 |
{ STATS_OFFSET32(total_multicast_packets_received_hi), |
| 389 |
8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, |
| 390 |
{ STATS_OFFSET32(total_broadcast_packets_received_hi), |
| 391 |
8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, |
| 392 |
{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), |
| 393 |
8, STATS_FLAGS_PORT, "rx_crc_errors" }, |
| 394 |
{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), |
| 395 |
8, STATS_FLAGS_PORT, "rx_align_errors" }, |
| 396 |
{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), |
| 397 |
8, STATS_FLAGS_PORT, "rx_undersize_packets" }, |
| 398 |
{ STATS_OFFSET32(etherstatsoverrsizepkts_hi), |
| 399 |
8, STATS_FLAGS_PORT, "rx_oversize_packets" }, |
| 400 |
{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi), |
| 401 |
8, STATS_FLAGS_PORT, "rx_fragments" }, |
| 402 |
{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), |
| 403 |
8, STATS_FLAGS_PORT, "rx_jabbers" }, |
| 404 |
{ STATS_OFFSET32(no_buff_discard_hi), |
| 405 |
8, STATS_FLAGS_BOTH, "rx_discards" }, |
| 406 |
{ STATS_OFFSET32(mac_filter_discard), |
| 407 |
4, STATS_FLAGS_PORT, "rx_filtered_packets" }, |
| 408 |
{ STATS_OFFSET32(mf_tag_discard), |
| 409 |
4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, |
| 410 |
{ STATS_OFFSET32(pfc_frames_received_hi), |
| 411 |
8, STATS_FLAGS_PORT, "pfc_frames_received" }, |
| 412 |
{ STATS_OFFSET32(pfc_frames_sent_hi), |
| 413 |
8, STATS_FLAGS_PORT, "pfc_frames_sent" }, |
| 414 |
{ STATS_OFFSET32(brb_drop_hi), |
| 415 |
8, STATS_FLAGS_PORT, "rx_brb_discard" }, |
| 416 |
{ STATS_OFFSET32(brb_truncate_hi), |
| 417 |
8, STATS_FLAGS_PORT, "rx_brb_truncate" }, |
| 418 |
{ STATS_OFFSET32(pause_frames_received_hi), |
| 419 |
8, STATS_FLAGS_PORT, "rx_pause_frames" }, |
| 420 |
{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), |
| 421 |
8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, |
| 422 |
{ STATS_OFFSET32(nig_timer_max), |
| 423 |
4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, |
| 424 |
{ STATS_OFFSET32(total_bytes_transmitted_hi), |
| 425 |
8, STATS_FLAGS_BOTH, "tx_bytes" }, |
| 426 |
{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), |
| 427 |
8, STATS_FLAGS_PORT, "tx_error_bytes" }, |
| 428 |
{ STATS_OFFSET32(total_unicast_packets_transmitted_hi), |
| 429 |
8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, |
| 430 |
{ STATS_OFFSET32(total_multicast_packets_transmitted_hi), |
| 431 |
8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, |
| 432 |
{ STATS_OFFSET32(total_broadcast_packets_transmitted_hi), |
| 433 |
8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, |
| 434 |
{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), |
| 435 |
8, STATS_FLAGS_PORT, "tx_mac_errors" }, |
| 436 |
{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), |
| 437 |
8, STATS_FLAGS_PORT, "tx_carrier_errors" }, |
| 438 |
{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), |
| 439 |
8, STATS_FLAGS_PORT, "tx_single_collisions" }, |
| 440 |
{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), |
| 441 |
8, STATS_FLAGS_PORT, "tx_multi_collisions" }, |
| 442 |
{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), |
| 443 |
8, STATS_FLAGS_PORT, "tx_deferred" }, |
| 444 |
{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), |
| 445 |
8, STATS_FLAGS_PORT, "tx_excess_collisions" }, |
| 446 |
{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), |
| 447 |
8, STATS_FLAGS_PORT, "tx_late_collisions" }, |
| 448 |
{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi), |
| 449 |
8, STATS_FLAGS_PORT, "tx_total_collisions" }, |
| 450 |
{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), |
| 451 |
8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, |
| 452 |
{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), |
| 453 |
8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, |
| 454 |
{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), |
| 455 |
8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, |
| 456 |
{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), |
| 457 |
8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, |
| 458 |
{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), |
| 459 |
8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, |
| 460 |
{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), |
| 461 |
8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, |
| 462 |
{ STATS_OFFSET32(etherstatspktsover1522octets_hi), |
| 463 |
8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, |
| 464 |
{ STATS_OFFSET32(pause_frames_sent_hi), |
| 465 |
8, STATS_FLAGS_PORT, "tx_pause_frames" }, |
| 466 |
{ STATS_OFFSET32(total_tpa_aggregations_hi), |
| 467 |
8, STATS_FLAGS_FUNC, "tpa_aggregations" }, |
| 468 |
{ STATS_OFFSET32(total_tpa_aggregated_frames_hi), |
| 469 |
8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, |
| 470 |
{ STATS_OFFSET32(total_tpa_bytes_hi), |
| 471 |
8, STATS_FLAGS_FUNC, "tpa_bytes"}, |
| 472 |
#if 0 |
| 473 |
{ STATS_OFFSET32(recoverable_error), |
| 474 |
4, STATS_FLAGS_FUNC, "recoverable_errors" }, |
| 475 |
{ STATS_OFFSET32(unrecoverable_error), |
| 476 |
4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, |
| 477 |
#endif |
| 478 |
{ STATS_OFFSET32(eee_tx_lpi), |
| 479 |
4, STATS_FLAGS_PORT, "eee_tx_lpi"}, |
| 480 |
{ STATS_OFFSET32(rx_calls), |
| 481 |
4, STATS_FLAGS_FUNC, "rx_calls"}, |
| 482 |
{ STATS_OFFSET32(rx_pkts), |
| 483 |
4, STATS_FLAGS_FUNC, "rx_pkts"}, |
| 484 |
{ STATS_OFFSET32(rx_tpa_pkts), |
| 485 |
4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, |
| 486 |
{ STATS_OFFSET32(rx_soft_errors), |
| 487 |
4, STATS_FLAGS_FUNC, "rx_soft_errors"}, |
| 488 |
{ STATS_OFFSET32(rx_hw_csum_errors), |
| 489 |
4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, |
| 490 |
{ STATS_OFFSET32(rx_ofld_frames_csum_ip), |
| 491 |
4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, |
| 492 |
{ STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), |
| 493 |
4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, |
| 494 |
{ STATS_OFFSET32(rx_budget_reached), |
| 495 |
4, STATS_FLAGS_FUNC, "rx_budget_reached"}, |
| 496 |
{ STATS_OFFSET32(tx_pkts), |
| 497 |
4, STATS_FLAGS_FUNC, "tx_pkts"}, |
| 498 |
{ STATS_OFFSET32(tx_soft_errors), |
| 499 |
4, STATS_FLAGS_FUNC, "tx_soft_errors"}, |
| 500 |
{ STATS_OFFSET32(tx_ofld_frames_csum_ip), |
| 501 |
4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, |
| 502 |
{ STATS_OFFSET32(tx_ofld_frames_csum_tcp), |
| 503 |
4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, |
| 504 |
{ STATS_OFFSET32(tx_ofld_frames_csum_udp), |
| 505 |
4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, |
| 506 |
{ STATS_OFFSET32(tx_ofld_frames_lso), |
| 507 |
4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, |
| 508 |
{ STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), |
| 509 |
4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, |
| 510 |
{ STATS_OFFSET32(tx_encap_failures), |
| 511 |
4, STATS_FLAGS_FUNC, "tx_encap_failures"}, |
| 512 |
{ STATS_OFFSET32(tx_hw_queue_full), |
| 513 |
4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, |
| 514 |
{ STATS_OFFSET32(tx_hw_max_queue_depth), |
| 515 |
4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, |
| 516 |
{ STATS_OFFSET32(tx_dma_mapping_failure), |
| 517 |
4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, |
| 518 |
{ STATS_OFFSET32(tx_max_drbr_queue_depth), |
| 519 |
4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, |
| 520 |
{ STATS_OFFSET32(tx_window_violation_std), |
| 521 |
4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, |
| 522 |
{ STATS_OFFSET32(tx_window_violation_tso), |
| 523 |
4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, |
| 524 |
#if 0 |
| 525 |
{ STATS_OFFSET32(tx_unsupported_tso_request_ipv6), |
| 526 |
4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"}, |
| 527 |
{ STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), |
| 528 |
4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"}, |
| 529 |
#endif |
| 530 |
{ STATS_OFFSET32(tx_chain_lost_mbuf), |
| 531 |
4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, |
| 532 |
{ STATS_OFFSET32(tx_frames_deferred), |
| 533 |
4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, |
| 534 |
{ STATS_OFFSET32(tx_queue_xoff), |
| 535 |
4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, |
| 536 |
{ STATS_OFFSET32(mbuf_defrag_attempts), |
| 537 |
4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, |
| 538 |
{ STATS_OFFSET32(mbuf_defrag_failures), |
| 539 |
4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, |
| 540 |
{ STATS_OFFSET32(mbuf_rx_bd_alloc_failed), |
| 541 |
4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, |
| 542 |
{ STATS_OFFSET32(mbuf_rx_bd_mapping_failed), |
| 543 |
4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, |
| 544 |
{ STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), |
| 545 |
4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, |
| 546 |
{ STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), |
| 547 |
4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, |
| 548 |
{ STATS_OFFSET32(mbuf_rx_sge_alloc_failed), |
| 549 |
4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, |
| 550 |
{ STATS_OFFSET32(mbuf_rx_sge_mapping_failed), |
| 551 |
4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, |
| 552 |
{ STATS_OFFSET32(mbuf_alloc_tx), |
| 553 |
4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, |
| 554 |
{ STATS_OFFSET32(mbuf_alloc_rx), |
| 555 |
4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, |
| 556 |
{ STATS_OFFSET32(mbuf_alloc_sge), |
| 557 |
4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, |
| 558 |
{ STATS_OFFSET32(mbuf_alloc_tpa), |
| 559 |
4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"} |
| 560 |
}; |
| 561 |
|
| 562 |
static const struct { |
| 563 |
uint32_t offset; |
| 564 |
uint32_t size; |
| 565 |
char string[STAT_NAME_LEN]; |
| 566 |
} bxe_eth_q_stats_arr[] = { |
| 567 |
{ Q_STATS_OFFSET32(total_bytes_received_hi), |
| 568 |
8, "rx_bytes" }, |
| 569 |
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi), |
| 570 |
8, "rx_ucast_packets" }, |
| 571 |
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi), |
| 572 |
8, "rx_mcast_packets" }, |
| 573 |
{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi), |
| 574 |
8, "rx_bcast_packets" }, |
| 575 |
{ Q_STATS_OFFSET32(no_buff_discard_hi), |
| 576 |
8, "rx_discards" }, |
| 577 |
{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), |
| 578 |
8, "tx_bytes" }, |
| 579 |
{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), |
| 580 |
8, "tx_ucast_packets" }, |
| 581 |
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), |
| 582 |
8, "tx_mcast_packets" }, |
| 583 |
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), |
| 584 |
8, "tx_bcast_packets" }, |
| 585 |
{ Q_STATS_OFFSET32(total_tpa_aggregations_hi), |
| 586 |
8, "tpa_aggregations" }, |
| 587 |
{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), |
| 588 |
8, "tpa_aggregated_frames"}, |
| 589 |
{ Q_STATS_OFFSET32(total_tpa_bytes_hi), |
| 590 |
8, "tpa_bytes"}, |
| 591 |
{ Q_STATS_OFFSET32(rx_calls), |
| 592 |
4, "rx_calls"}, |
| 593 |
{ Q_STATS_OFFSET32(rx_pkts), |
| 594 |
4, "rx_pkts"}, |
| 595 |
{ Q_STATS_OFFSET32(rx_tpa_pkts), |
| 596 |
4, "rx_tpa_pkts"}, |
| 597 |
{ Q_STATS_OFFSET32(rx_soft_errors), |
| 598 |
4, "rx_soft_errors"}, |
| 599 |
{ Q_STATS_OFFSET32(rx_hw_csum_errors), |
| 600 |
4, "rx_hw_csum_errors"}, |
| 601 |
{ Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), |
| 602 |
4, "rx_ofld_frames_csum_ip"}, |
| 603 |
{ Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), |
| 604 |
4, "rx_ofld_frames_csum_tcp_udp"}, |
| 605 |
{ Q_STATS_OFFSET32(rx_budget_reached), |
| 606 |
4, "rx_budget_reached"}, |
| 607 |
{ Q_STATS_OFFSET32(tx_pkts), |
| 608 |
4, "tx_pkts"}, |
| 609 |
{ Q_STATS_OFFSET32(tx_soft_errors), |
| 610 |
4, "tx_soft_errors"}, |
| 611 |
{ Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), |
| 612 |
4, "tx_ofld_frames_csum_ip"}, |
| 613 |
{ Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), |
| 614 |
4, "tx_ofld_frames_csum_tcp"}, |
| 615 |
{ Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), |
| 616 |
4, "tx_ofld_frames_csum_udp"}, |
| 617 |
{ Q_STATS_OFFSET32(tx_ofld_frames_lso), |
| 618 |
4, "tx_ofld_frames_lso"}, |
| 619 |
{ Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), |
| 620 |
4, "tx_ofld_frames_lso_hdr_splits"}, |
| 621 |
{ Q_STATS_OFFSET32(tx_encap_failures), |
| 622 |
4, "tx_encap_failures"}, |
| 623 |
{ Q_STATS_OFFSET32(tx_hw_queue_full), |
| 624 |
4, "tx_hw_queue_full"}, |
| 625 |
{ Q_STATS_OFFSET32(tx_hw_max_queue_depth), |
| 626 |
4, "tx_hw_max_queue_depth"}, |
| 627 |
{ Q_STATS_OFFSET32(tx_dma_mapping_failure), |
| 628 |
4, "tx_dma_mapping_failure"}, |
| 629 |
{ Q_STATS_OFFSET32(tx_max_drbr_queue_depth), |
| 630 |
4, "tx_max_drbr_queue_depth"}, |
| 631 |
{ Q_STATS_OFFSET32(tx_window_violation_std), |
| 632 |
4, "tx_window_violation_std"}, |
| 633 |
{ Q_STATS_OFFSET32(tx_window_violation_tso), |
| 634 |
4, "tx_window_violation_tso"}, |
| 635 |
#if 0 |
| 636 |
{ Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6), |
| 637 |
4, "tx_unsupported_tso_request_ipv6"}, |
| 638 |
{ Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), |
| 639 |
4, "tx_unsupported_tso_request_not_tcp"}, |
| 640 |
#endif |
| 641 |
{ Q_STATS_OFFSET32(tx_chain_lost_mbuf), |
| 642 |
4, "tx_chain_lost_mbuf"}, |
| 643 |
{ Q_STATS_OFFSET32(tx_frames_deferred), |
| 644 |
4, "tx_frames_deferred"}, |
| 645 |
{ Q_STATS_OFFSET32(tx_queue_xoff), |
| 646 |
4, "tx_queue_xoff"}, |
| 647 |
{ Q_STATS_OFFSET32(mbuf_defrag_attempts), |
| 648 |
4, "mbuf_defrag_attempts"}, |
| 649 |
{ Q_STATS_OFFSET32(mbuf_defrag_failures), |
| 650 |
4, "mbuf_defrag_failures"}, |
| 651 |
{ Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), |
| 652 |
4, "mbuf_rx_bd_alloc_failed"}, |
| 653 |
{ Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), |
| 654 |
4, "mbuf_rx_bd_mapping_failed"}, |
| 655 |
{ Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), |
| 656 |
4, "mbuf_rx_tpa_alloc_failed"}, |
| 657 |
{ Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), |
| 658 |
4, "mbuf_rx_tpa_mapping_failed"}, |
| 659 |
{ Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), |
| 660 |
4, "mbuf_rx_sge_alloc_failed"}, |
| 661 |
{ Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), |
| 662 |
4, "mbuf_rx_sge_mapping_failed"}, |
| 663 |
{ Q_STATS_OFFSET32(mbuf_alloc_tx), |
| 664 |
4, "mbuf_alloc_tx"}, |
| 665 |
{ Q_STATS_OFFSET32(mbuf_alloc_rx), |
| 666 |
4, "mbuf_alloc_rx"}, |
| 667 |
{ Q_STATS_OFFSET32(mbuf_alloc_sge), |
| 668 |
4, "mbuf_alloc_sge"}, |
| 669 |
{ Q_STATS_OFFSET32(mbuf_alloc_tpa), |
| 670 |
4, "mbuf_alloc_tpa"} |
| 671 |
}; |
| 672 |
|
| 673 |
#define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) |
| 674 |
#define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) |
| 675 |
|
| 676 |
|
| 677 |
static void bxe_cmng_fns_init(struct bxe_softc *sc, |
| 678 |
uint8_t read_cfg, |
| 679 |
uint8_t cmng_type); |
| 680 |
static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); |
| 681 |
static void storm_memset_cmng(struct bxe_softc *sc, |
| 682 |
struct cmng_init *cmng, |
| 683 |
uint8_t port); |
| 684 |
static void bxe_set_reset_global(struct bxe_softc *sc); |
| 685 |
static void bxe_set_reset_in_progress(struct bxe_softc *sc); |
| 686 |
static uint8_t bxe_reset_is_done(struct bxe_softc *sc, |
| 687 |
int engine); |
| 688 |
static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); |
| 689 |
static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, |
| 690 |
uint8_t *global, |
| 691 |
uint8_t print); |
| 692 |
static void bxe_int_disable(struct bxe_softc *sc); |
| 693 |
static int bxe_release_leader_lock(struct bxe_softc *sc); |
| 694 |
static void bxe_pf_disable(struct bxe_softc *sc); |
| 695 |
static void bxe_free_fp_buffers(struct bxe_softc *sc); |
| 696 |
static inline void bxe_update_rx_prod(struct bxe_softc *sc, |
| 697 |
struct bxe_fastpath *fp, |
| 698 |
uint16_t rx_bd_prod, |
| 699 |
uint16_t rx_cq_prod, |
| 700 |
uint16_t rx_sge_prod); |
| 701 |
static void bxe_link_report_locked(struct bxe_softc *sc); |
| 702 |
static void bxe_link_report(struct bxe_softc *sc); |
| 703 |
static void bxe_link_status_update(struct bxe_softc *sc); |
| 704 |
static void bxe_periodic_callout_func(void *xsc); |
| 705 |
static void bxe_periodic_start(struct bxe_softc *sc); |
| 706 |
static void bxe_periodic_stop(struct bxe_softc *sc); |
| 707 |
static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, |
| 708 |
uint16_t prev_index, |
| 709 |
uint16_t index); |
| 710 |
static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, |
| 711 |
int queue); |
| 712 |
static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, |
| 713 |
uint16_t index); |
| 714 |
static uint8_t bxe_txeof(struct bxe_softc *sc, |
| 715 |
struct bxe_fastpath *fp); |
| 716 |
static void bxe_task_fp(struct bxe_fastpath *fp); |
| 717 |
static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, |
| 718 |
struct mbuf *m, |
| 719 |
uint8_t contents); |
| 720 |
static int bxe_alloc_mem(struct bxe_softc *sc); |
| 721 |
static void bxe_free_mem(struct bxe_softc *sc); |
| 722 |
static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); |
| 723 |
static void bxe_free_fw_stats_mem(struct bxe_softc *sc); |
| 724 |
static int bxe_interrupt_attach(struct bxe_softc *sc); |
| 725 |
static void bxe_interrupt_detach(struct bxe_softc *sc); |
| 726 |
static void bxe_set_rx_mode(struct bxe_softc *sc); |
| 727 |
static int bxe_init_locked(struct bxe_softc *sc); |
| 728 |
static int bxe_stop_locked(struct bxe_softc *sc); |
| 729 |
static __noinline int bxe_nic_load(struct bxe_softc *sc, |
| 730 |
int load_mode); |
| 731 |
static __noinline int bxe_nic_unload(struct bxe_softc *sc, |
| 732 |
uint32_t unload_mode, |
| 733 |
uint8_t keep_link); |
| 734 |
|
| 735 |
static void bxe_handle_sp_tq(void *context, int pending); |
| 736 |
static void bxe_handle_rx_mode_tq(void *context, int pending); |
| 737 |
static void bxe_handle_fp_tq(void *context, int pending); |
| 738 |
|
| 739 |
|
| 740 |
/* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ |
| 741 |
uint32_t |
| 742 |
calc_crc32(uint8_t *crc32_packet, |
| 743 |
uint32_t crc32_length, |
| 744 |
uint32_t crc32_seed, |
| 745 |
uint8_t complement) |
| 746 |
{ |
| 747 |
uint32_t byte = 0; |
| 748 |
uint32_t bit = 0; |
| 749 |
uint8_t msb = 0; |
| 750 |
uint32_t temp = 0; |
| 751 |
uint32_t shft = 0; |
| 752 |
uint8_t current_byte = 0; |
| 753 |
uint32_t crc32_result = crc32_seed; |
| 754 |
const uint32_t CRC32_POLY = 0x1edc6f41; |
| 755 |
|
| 756 |
if ((crc32_packet == NULL) || |
| 757 |
(crc32_length == 0) || |
| 758 |
((crc32_length % 8) != 0)) |
| 759 |
{ |
| 760 |
return (crc32_result); |
| 761 |
} |
| 762 |
|
| 763 |
for (byte = 0; byte < crc32_length; byte = byte + 1) |
| 764 |
{ |
| 765 |
current_byte = crc32_packet[byte]; |
| 766 |
for (bit = 0; bit < 8; bit = bit + 1) |
| 767 |
{ |
| 768 |
/* msb = crc32_result[31]; */ |
| 769 |
msb = (uint8_t)(crc32_result >> 31); |
| 770 |
|
| 771 |
crc32_result = crc32_result << 1; |
| 772 |
|
| 773 |
/* it (msb != current_byte[bit]) */ |
| 774 |
if (msb != (0x1 & (current_byte >> bit))) |
| 775 |
{ |
| 776 |
crc32_result = crc32_result ^ CRC32_POLY; |
| 777 |
/* crc32_result[0] = 1 */ |
| 778 |
crc32_result |= 1; |
| 779 |
} |
| 780 |
} |
| 781 |
} |
| 782 |
|
| 783 |
/* Last step is to: |
| 784 |
* 1. "mirror" every bit |
| 785 |
* 2. swap the 4 bytes |
| 786 |
* 3. complement each bit |
| 787 |
*/ |
| 788 |
|
| 789 |
/* Mirror */ |
| 790 |
temp = crc32_result; |
| 791 |
shft = sizeof(crc32_result) * 8 - 1; |
| 792 |
|
| 793 |
for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) |
| 794 |
{ |
| 795 |
temp <<= 1; |
| 796 |
temp |= crc32_result & 1; |
| 797 |
shft-- ; |
| 798 |
} |
| 799 |
|
| 800 |
/* temp[31-bit] = crc32_result[bit] */ |
| 801 |
temp <<= shft; |
| 802 |
|
| 803 |
/* Swap */ |
| 804 |
/* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ |
| 805 |
{ |
| 806 |
uint32_t t0, t1, t2, t3; |
| 807 |
t0 = (0x000000ff & (temp >> 24)); |
| 808 |
t1 = (0x0000ff00 & (temp >> 8)); |
| 809 |
t2 = (0x00ff0000 & (temp << 8)); |
| 810 |
t3 = (0xff000000 & (temp << 24)); |
| 811 |
crc32_result = t0 | t1 | t2 | t3; |
| 812 |
} |
| 813 |
|
| 814 |
/* Complement */ |
| 815 |
if (complement) |
| 816 |
{ |
| 817 |
crc32_result = ~crc32_result; |
| 818 |
} |
| 819 |
|
| 820 |
return (crc32_result); |
| 821 |
} |
| 822 |
|
| 823 |
int |
| 824 |
bxe_test_bit(int nr, |
| 825 |
volatile unsigned long *addr) |
| 826 |
{ |
| 827 |
return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); |
| 828 |
} |
| 829 |
|
| 830 |
void |
| 831 |
bxe_set_bit(unsigned int nr, |
| 832 |
volatile unsigned long *addr) |
| 833 |
{ |
| 834 |
atomic_set_acq_long(addr, (1 << nr)); |
| 835 |
} |
| 836 |
|
| 837 |
void |
| 838 |
bxe_clear_bit(int nr, |
| 839 |
volatile unsigned long *addr) |
| 840 |
{ |
| 841 |
atomic_clear_acq_long(addr, (1 << nr)); |
| 842 |
} |
| 843 |
|
| 844 |
int |
| 845 |
bxe_test_and_set_bit(int nr, |
| 846 |
volatile unsigned long *addr) |
| 847 |
{ |
| 848 |
unsigned long x; |
| 849 |
nr = (1 << nr); |
| 850 |
do { |
| 851 |
x = *addr; |
| 852 |
} while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); |
| 853 |
// if (x & nr) bit_was_set; else bit_was_not_set; |
| 854 |
return (x & nr); |
| 855 |
} |
| 856 |
|
| 857 |
int |
| 858 |
bxe_test_and_clear_bit(int nr, |
| 859 |
volatile unsigned long *addr) |
| 860 |
{ |
| 861 |
unsigned long x; |
| 862 |
nr = (1 << nr); |
| 863 |
do { |
| 864 |
x = *addr; |
| 865 |
} while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); |
| 866 |
// if (x & nr) bit_was_set; else bit_was_not_set; |
| 867 |
return (x & nr); |
| 868 |
} |
| 869 |
|
| 870 |
int |
| 871 |
bxe_cmpxchg(volatile int *addr, |
| 872 |
int old, |
| 873 |
int new) |
| 874 |
{ |
| 875 |
int x; |
| 876 |
do { |
| 877 |
x = *addr; |
| 878 |
} while (atomic_cmpset_acq_int(addr, old, new) == 0); |
| 879 |
return (x); |
| 880 |
} |
| 881 |
|
| 882 |
/* |
| 883 |
* Get DMA memory from the OS. |
| 884 |
* |
| 885 |
* Validates that the OS has provided DMA buffers in response to a |
| 886 |
* bus_dmamap_load call and saves the physical address of those buffers. |
| 887 |
* When the callback is used the OS will return 0 for the mapping function |
| 888 |
* (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any |
| 889 |
* failures back to the caller. |
| 890 |
* |
| 891 |
* Returns: |
| 892 |
* Nothing. |
| 893 |
*/ |
| 894 |
static void |
| 895 |
bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) |
| 896 |
{ |
| 897 |
struct bxe_dma *dma = arg; |
| 898 |
|
| 899 |
if (error) { |
| 900 |
dma->paddr = 0; |
| 901 |
dma->nseg = 0; |
| 902 |
BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); |
| 903 |
} else { |
| 904 |
dma->paddr = segs->ds_addr; |
| 905 |
dma->nseg = nseg; |
| 906 |
#if 0 |
| 907 |
BLOGD(dma->sc, DBG_LOAD, |
| 908 |
"DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", |
| 909 |
dma->msg, dma->vaddr, (void *)dma->paddr, |
| 910 |
dma->nseg, dma->size); |
| 911 |
#endif |
| 912 |
} |
| 913 |
} |
| 914 |
|
| 915 |
/* |
| 916 |
* Allocate a block of memory and map it for DMA. No partial completions |
| 917 |
* allowed and release any resources acquired if we can't acquire all |
| 918 |
* resources. |
| 919 |
* |
| 920 |
* Returns: |
| 921 |
* 0 = Success, !0 = Failure |
| 922 |
*/ |
| 923 |
int |
| 924 |
bxe_dma_alloc(struct bxe_softc *sc, |
| 925 |
bus_size_t size, |
| 926 |
struct bxe_dma *dma, |
| 927 |
const char *msg) |
| 928 |
{ |
| 929 |
int rc; |
| 930 |
|
| 931 |
if (dma->size > 0) { |
| 932 |
BLOGE(sc, "dma block '%s' already has size %lu\n", msg, |
| 933 |
(unsigned long)dma->size); |
| 934 |
return (1); |
| 935 |
} |
| 936 |
|
| 937 |
memset(dma, 0, sizeof(*dma)); /* sanity */ |
| 938 |
dma->sc = sc; |
| 939 |
dma->size = size; |
| 940 |
snprintf(dma->msg, sizeof(dma->msg), "%s", msg); |
| 941 |
|
| 942 |
rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ |
| 943 |
BCM_PAGE_SIZE, /* alignment */ |
| 944 |
0, /* boundary limit */ |
| 945 |
BUS_SPACE_MAXADDR, /* restricted low */ |
| 946 |
BUS_SPACE_MAXADDR, /* restricted hi */ |
| 947 |
NULL, /* addr filter() */ |
| 948 |
NULL, /* addr filter() arg */ |
| 949 |
size, /* max map size */ |
| 950 |
1, /* num discontinuous */ |
| 951 |
size, /* max seg size */ |
| 952 |
BUS_DMA_ALLOCNOW, /* flags */ |
| 953 |
NULL, /* lock() */ |
| 954 |
NULL, /* lock() arg */ |
| 955 |
&dma->tag); /* returned dma tag */ |
| 956 |
if (rc != 0) { |
| 957 |
BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); |
| 958 |
memset(dma, 0, sizeof(*dma)); |
| 959 |
return (1); |
| 960 |
} |
| 961 |
|
| 962 |
rc = bus_dmamem_alloc(dma->tag, |
| 963 |
(void **)&dma->vaddr, |
| 964 |
(BUS_DMA_NOWAIT | BUS_DMA_ZERO), |
| 965 |
&dma->map); |
| 966 |
if (rc != 0) { |
| 967 |
BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); |
| 968 |
bus_dma_tag_destroy(dma->tag); |
| 969 |
memset(dma, 0, sizeof(*dma)); |
| 970 |
return (1); |
| 971 |
} |
| 972 |
|
| 973 |
rc = bus_dmamap_load(dma->tag, |
| 974 |
dma->map, |
| 975 |
dma->vaddr, |
| 976 |
size, |
| 977 |
bxe_dma_map_addr, /* BLOGD in here */ |
| 978 |
dma, |
| 979 |
BUS_DMA_NOWAIT); |
| 980 |
if (rc != 0) { |
| 981 |
BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); |
| 982 |
bus_dmamem_free(dma->tag, dma->vaddr, dma->map); |
| 983 |
bus_dma_tag_destroy(dma->tag); |
| 984 |
memset(dma, 0, sizeof(*dma)); |
| 985 |
return (1); |
| 986 |
} |
| 987 |
|
| 988 |
return (0); |
| 989 |
} |
| 990 |
|
| 991 |
void |
| 992 |
bxe_dma_free(struct bxe_softc *sc, |
| 993 |
struct bxe_dma *dma) |
| 994 |
{ |
| 995 |
if (dma->size > 0) { |
| 996 |
#if 0 |
| 997 |
BLOGD(sc, DBG_LOAD, |
| 998 |
"DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", |
| 999 |
dma->msg, dma->vaddr, (void *)dma->paddr, |
| 1000 |
dma->nseg, dma->size); |
| 1001 |
#endif |
| 1002 |
|
| 1003 |
DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); |
| 1004 |
|
| 1005 |
bus_dmamap_sync(dma->tag, dma->map, |
| 1006 |
(BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); |
| 1007 |
bus_dmamap_unload(dma->tag, dma->map); |
| 1008 |
bus_dmamem_free(dma->tag, dma->vaddr, dma->map); |
| 1009 |
bus_dma_tag_destroy(dma->tag); |
| 1010 |
} |
| 1011 |
|
| 1012 |
memset(dma, 0, sizeof(*dma)); |
| 1013 |
} |
| 1014 |
|
| 1015 |
/* |
| 1016 |
* These indirect read and write routines are only during init. |
| 1017 |
* The locking is handled by the MCP. |
| 1018 |
*/ |
| 1019 |
|
| 1020 |
void |
| 1021 |
bxe_reg_wr_ind(struct bxe_softc *sc, |
| 1022 |
uint32_t addr, |
| 1023 |
uint32_t val) |
| 1024 |
{ |
| 1025 |
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); |
| 1026 |
pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); |
| 1027 |
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); |
| 1028 |
} |
| 1029 |
|
| 1030 |
uint32_t |
| 1031 |
bxe_reg_rd_ind(struct bxe_softc *sc, |
| 1032 |
uint32_t addr) |
| 1033 |
{ |
| 1034 |
uint32_t val; |
| 1035 |
|
| 1036 |
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); |
| 1037 |
val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); |
| 1038 |
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); |
| 1039 |
|
| 1040 |
return (val); |
| 1041 |
} |
| 1042 |
|
| 1043 |
#if 0 |
| 1044 |
void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl) |
| 1045 |
{ |
| 1046 |
uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC; |
| 1047 |
|
| 1048 |
switch (dmae->opcode & DMAE_COMMAND_DST) { |
| 1049 |
case DMAE_CMD_DST_PCI: |
| 1050 |
if (src_type == DMAE_CMD_SRC_PCI) |
| 1051 |
DP(msglvl, "DMAE: opcode 0x%08x\n" |
| 1052 |
"src [%x:%08x], len [%d*4], dst [%x:%08x]\n" |
| 1053 |
"comp_addr [%x:%08x], comp_val 0x%08x\n", |
| 1054 |
dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, |
| 1055 |
dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, |
| 1056 |
dmae->comp_addr_hi, dmae->comp_addr_lo, |
| 1057 |
dmae->comp_val); |
| 1058 |
else |
| 1059 |
DP(msglvl, "DMAE: opcode 0x%08x\n" |
| 1060 |
"src [%08x], len [%d*4], dst [%x:%08x]\n" |
| 1061 |
"comp_addr [%x:%08x], comp_val 0x%08x\n", |
| 1062 |
dmae->opcode, dmae->src_addr_lo >> 2, |
| 1063 |
dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, |
| 1064 |
dmae->comp_addr_hi, dmae->comp_addr_lo, |
| 1065 |
dmae->comp_val); |
| 1066 |
break; |
| 1067 |
case DMAE_CMD_DST_GRC: |
| 1068 |
if (src_type == DMAE_CMD_SRC_PCI) |
| 1069 |
DP(msglvl, "DMAE: opcode 0x%08x\n" |
| 1070 |
"src [%x:%08x], len [%d*4], dst_addr [%08x]\n" |
| 1071 |
"comp_addr [%x:%08x], comp_val 0x%08x\n", |
| 1072 |
dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, |
| 1073 |
dmae->len, dmae->dst_addr_lo >> 2, |
| 1074 |
dmae->comp_addr_hi, dmae->comp_addr_lo, |
| 1075 |
dmae->comp_val); |
| 1076 |
else |
| 1077 |
DP(msglvl, "DMAE: opcode 0x%08x\n" |
| 1078 |
"src [%08x], len [%d*4], dst [%08x]\n" |
| 1079 |
"comp_addr [%x:%08x], comp_val 0x%08x\n", |
| 1080 |
dmae->opcode, dmae->src_addr_lo >> 2, |
| 1081 |
dmae->len, dmae->dst_addr_lo >> 2, |
| 1082 |
dmae->comp_addr_hi, dmae->comp_addr_lo, |
| 1083 |
dmae->comp_val); |
| 1084 |
break; |
| 1085 |
default: |
| 1086 |
if (src_type == DMAE_CMD_SRC_PCI) |
| 1087 |
DP(msglvl, "DMAE: opcode 0x%08x\n" |
| 1088 |
"src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" |
| 1089 |
"comp_addr [%x:%08x] comp_val 0x%08x\n", |
| 1090 |
dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, |
| 1091 |
dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, |
| 1092 |
dmae->comp_val); |
| 1093 |
else |
| 1094 |
DP(msglvl, "DMAE: opcode 0x%08x\n" |
| 1095 |
"src_addr [%08x] len [%d * 4] dst_addr [none]\n" |
| 1096 |
"comp_addr [%x:%08x] comp_val 0x%08x\n", |
| 1097 |
dmae->opcode, dmae->src_addr_lo >> 2, |
| 1098 |
dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, |
| 1099 |
dmae->comp_val); |
| 1100 |
break; |
| 1101 |
} |
| 1102 |
|
| 1103 |
} |
| 1104 |
#endif |
| 1105 |
|
| 1106 |
static int |
| 1107 |
bxe_acquire_hw_lock(struct bxe_softc *sc, |
| 1108 |
uint32_t resource) |
| 1109 |
{ |
| 1110 |
uint32_t lock_status; |
| 1111 |
uint32_t resource_bit = (1 << resource); |
| 1112 |
int func = SC_FUNC(sc); |
| 1113 |
uint32_t hw_lock_control_reg; |
| 1114 |
int cnt; |
| 1115 |
|
| 1116 |
/* validate the resource is within range */ |
| 1117 |
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { |
| 1118 |
BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); |
| 1119 |
return (-1); |
| 1120 |
} |
| 1121 |
|
| 1122 |
if (func <= 5) { |
| 1123 |
hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); |
| 1124 |
} else { |
| 1125 |
hw_lock_control_reg = |
| 1126 |
(MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); |
| 1127 |
} |
| 1128 |
|
| 1129 |
/* validate the resource is not already taken */ |
| 1130 |
lock_status = REG_RD(sc, hw_lock_control_reg); |
| 1131 |
if (lock_status & resource_bit) { |
| 1132 |
BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n", |
| 1133 |
lock_status, resource_bit); |
| 1134 |
return (-1); |
| 1135 |
} |
| 1136 |
|
| 1137 |
/* try every 5ms for 5 seconds */ |
| 1138 |
for (cnt = 0; cnt < 1000; cnt++) { |
| 1139 |
REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); |
| 1140 |
lock_status = REG_RD(sc, hw_lock_control_reg); |
| 1141 |
if (lock_status & resource_bit) { |
| 1142 |
return (0); |
| 1143 |
} |
| 1144 |
DELAY(5000); |
| 1145 |
} |
| 1146 |
|
| 1147 |
BLOGE(sc, "Resource lock timeout!\n"); |
| 1148 |
return (-1); |
| 1149 |
} |
| 1150 |
|
| 1151 |
static int |
| 1152 |
bxe_release_hw_lock(struct bxe_softc *sc, |
| 1153 |
uint32_t resource) |
| 1154 |
{ |
| 1155 |
uint32_t lock_status; |
| 1156 |
uint32_t resource_bit = (1 << resource); |
| 1157 |
int func = SC_FUNC(sc); |
| 1158 |
uint32_t hw_lock_control_reg; |
| 1159 |
|
| 1160 |
/* validate the resource is within range */ |
| 1161 |
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { |
| 1162 |
BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); |
| 1163 |
return (-1); |
| 1164 |
} |
| 1165 |
|
| 1166 |
if (func <= 5) { |
| 1167 |
hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); |
| 1168 |
} else { |
| 1169 |
hw_lock_control_reg = |
| 1170 |
(MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); |
| 1171 |
} |
| 1172 |
|
| 1173 |
/* validate the resource is currently taken */ |
| 1174 |
lock_status = REG_RD(sc, hw_lock_control_reg); |
| 1175 |
if (!(lock_status & resource_bit)) { |
| 1176 |
BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n", |
| 1177 |
lock_status, resource_bit); |
| 1178 |
return (-1); |
| 1179 |
} |
| 1180 |
|
| 1181 |
REG_WR(sc, hw_lock_control_reg, resource_bit); |
| 1182 |
return (0); |
| 1183 |
} |
| 1184 |
|
| 1185 |
/* |
| 1186 |
* Per pf misc lock must be acquired before the per port mcp lock. Otherwise, |
| 1187 |
* had we done things the other way around, if two pfs from the same port |
| 1188 |
* would attempt to access nvram at the same time, we could run into a |
| 1189 |
* scenario such as: |
| 1190 |
* pf A takes the port lock. |
| 1191 |
* pf B succeeds in taking the same lock since they are from the same port. |
| 1192 |
* pf A takes the per pf misc lock. Performs eeprom access. |
| 1193 |
* pf A finishes. Unlocks the per pf misc lock. |
| 1194 |
* Pf B takes the lock and proceeds to perform it's own access. |
| 1195 |
* pf A unlocks the per port lock, while pf B is still working (!). |
| 1196 |
* mcp takes the per port lock and corrupts pf B's access (and/or has it's own |
| 1197 |
* access corrupted by pf B).* |
| 1198 |
*/ |
| 1199 |
static int |
| 1200 |
bxe_acquire_nvram_lock(struct bxe_softc *sc) |
| 1201 |
{ |
| 1202 |
int port = SC_PORT(sc); |
| 1203 |
int count, i; |
| 1204 |
uint32_t val = 0; |
| 1205 |
|
| 1206 |
/* acquire HW lock: protect against other PFs in PF Direct Assignment */ |
| 1207 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); |
| 1208 |
|
| 1209 |
/* adjust timeout for emulation/FPGA */ |
| 1210 |
count = NVRAM_TIMEOUT_COUNT; |
| 1211 |
if (CHIP_REV_IS_SLOW(sc)) { |
| 1212 |
count *= 100; |
| 1213 |
} |
| 1214 |
|
| 1215 |
/* request access to nvram interface */ |
| 1216 |
REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, |
| 1217 |
(MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); |
| 1218 |
|
| 1219 |
for (i = 0; i < count*10; i++) { |
| 1220 |
val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); |
| 1221 |
if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { |
| 1222 |
break; |
| 1223 |
} |
| 1224 |
|
| 1225 |
DELAY(5); |
| 1226 |
} |
| 1227 |
|
| 1228 |
if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { |
| 1229 |
BLOGE(sc, "Cannot get access to nvram interface\n"); |
| 1230 |
return (-1); |
| 1231 |
} |
| 1232 |
|
| 1233 |
return (0); |
| 1234 |
} |
| 1235 |
|
| 1236 |
static int |
| 1237 |
bxe_release_nvram_lock(struct bxe_softc *sc) |
| 1238 |
{ |
| 1239 |
int port = SC_PORT(sc); |
| 1240 |
int count, i; |
| 1241 |
uint32_t val = 0; |
| 1242 |
|
| 1243 |
/* adjust timeout for emulation/FPGA */ |
| 1244 |
count = NVRAM_TIMEOUT_COUNT; |
| 1245 |
if (CHIP_REV_IS_SLOW(sc)) { |
| 1246 |
count *= 100; |
| 1247 |
} |
| 1248 |
|
| 1249 |
/* relinquish nvram interface */ |
| 1250 |
REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, |
| 1251 |
(MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); |
| 1252 |
|
| 1253 |
for (i = 0; i < count*10; i++) { |
| 1254 |
val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); |
| 1255 |
if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { |
| 1256 |
break; |
| 1257 |
} |
| 1258 |
|
| 1259 |
DELAY(5); |
| 1260 |
} |
| 1261 |
|
| 1262 |
if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { |
| 1263 |
BLOGE(sc, "Cannot free access to nvram interface\n"); |
| 1264 |
return (-1); |
| 1265 |
} |
| 1266 |
|
| 1267 |
/* release HW lock: protect against other PFs in PF Direct Assignment */ |
| 1268 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); |
| 1269 |
|
| 1270 |
return (0); |
| 1271 |
} |
| 1272 |
|
| 1273 |
static void |
| 1274 |
bxe_enable_nvram_access(struct bxe_softc *sc) |
| 1275 |
{ |
| 1276 |
uint32_t val; |
| 1277 |
|
| 1278 |
val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); |
| 1279 |
|
| 1280 |
/* enable both bits, even on read */ |
| 1281 |
REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, |
| 1282 |
(val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); |
| 1283 |
} |
| 1284 |
|
| 1285 |
static void |
| 1286 |
bxe_disable_nvram_access(struct bxe_softc *sc) |
| 1287 |
{ |
| 1288 |
uint32_t val; |
| 1289 |
|
| 1290 |
val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); |
| 1291 |
|
| 1292 |
/* disable both bits, even after read */ |
| 1293 |
REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, |
| 1294 |
(val & ~(MCPR_NVM_ACCESS_ENABLE_EN | |
| 1295 |
MCPR_NVM_ACCESS_ENABLE_WR_EN))); |
| 1296 |
} |
| 1297 |
|
| 1298 |
static int |
| 1299 |
bxe_nvram_read_dword(struct bxe_softc *sc, |
| 1300 |
uint32_t offset, |
| 1301 |
uint32_t *ret_val, |
| 1302 |
uint32_t cmd_flags) |
| 1303 |
{ |
| 1304 |
int count, i, rc; |
| 1305 |
uint32_t val; |
| 1306 |
|
| 1307 |
/* build the command word */ |
| 1308 |
cmd_flags |= MCPR_NVM_COMMAND_DOIT; |
| 1309 |
|
| 1310 |
/* need to clear DONE bit separately */ |
| 1311 |
REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); |
| 1312 |
|
| 1313 |
/* address of the NVRAM to read from */ |
| 1314 |
REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, |
| 1315 |
(offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); |
| 1316 |
|
| 1317 |
/* issue a read command */ |
| 1318 |
REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); |
| 1319 |
|
| 1320 |
/* adjust timeout for emulation/FPGA */ |
| 1321 |
count = NVRAM_TIMEOUT_COUNT; |
| 1322 |
if (CHIP_REV_IS_SLOW(sc)) { |
| 1323 |
count *= 100; |
| 1324 |
} |
| 1325 |
|
| 1326 |
/* wait for completion */ |
| 1327 |
*ret_val = 0; |
| 1328 |
rc = -1; |
| 1329 |
for (i = 0; i < count; i++) { |
| 1330 |
DELAY(5); |
| 1331 |
val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); |
| 1332 |
|
| 1333 |
if (val & MCPR_NVM_COMMAND_DONE) { |
| 1334 |
val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); |
| 1335 |
/* we read nvram data in cpu order |
| 1336 |
* but ethtool sees it as an array of bytes |
| 1337 |
* converting to big-endian will do the work |
| 1338 |
*/ |
| 1339 |
*ret_val = htobe32(val); |
| 1340 |
rc = 0; |
| 1341 |
break; |
| 1342 |
} |
| 1343 |
} |
| 1344 |
|
| 1345 |
if (rc == -1) { |
| 1346 |
BLOGE(sc, "nvram read timeout expired\n"); |
| 1347 |
} |
| 1348 |
|
| 1349 |
return (rc); |
| 1350 |
} |
| 1351 |
|
| 1352 |
static int |
| 1353 |
bxe_nvram_read(struct bxe_softc *sc, |
| 1354 |
uint32_t offset, |
| 1355 |
uint8_t *ret_buf, |
| 1356 |
int buf_size) |
| 1357 |
{ |
| 1358 |
uint32_t cmd_flags; |
| 1359 |
uint32_t val; |
| 1360 |
int rc; |
| 1361 |
|
| 1362 |
if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { |
| 1363 |
BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", |
| 1364 |
offset, buf_size); |
| 1365 |
return (-1); |
| 1366 |
} |
| 1367 |
|
| 1368 |
if ((offset + buf_size) > sc->devinfo.flash_size) { |
| 1369 |
BLOGE(sc, "Invalid parameter, " |
| 1370 |
"offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", |
| 1371 |
offset, buf_size, sc->devinfo.flash_size); |
| 1372 |
return (-1); |
| 1373 |
} |
| 1374 |
|
| 1375 |
/* request access to nvram interface */ |
| 1376 |
rc = bxe_acquire_nvram_lock(sc); |
| 1377 |
if (rc) { |
| 1378 |
return (rc); |
| 1379 |
} |
| 1380 |
|
| 1381 |
/* enable access to nvram interface */ |
| 1382 |
bxe_enable_nvram_access(sc); |
| 1383 |
|
| 1384 |
/* read the first word(s) */ |
| 1385 |
cmd_flags = MCPR_NVM_COMMAND_FIRST; |
| 1386 |
while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { |
| 1387 |
rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); |
| 1388 |
memcpy(ret_buf, &val, 4); |
| 1389 |
|
| 1390 |
/* advance to the next dword */ |
| 1391 |
offset += sizeof(uint32_t); |
| 1392 |
ret_buf += sizeof(uint32_t); |
| 1393 |
buf_size -= sizeof(uint32_t); |
| 1394 |
cmd_flags = 0; |
| 1395 |
} |
| 1396 |
|
| 1397 |
if (rc == 0) { |
| 1398 |
cmd_flags |= MCPR_NVM_COMMAND_LAST; |
| 1399 |
rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); |
| 1400 |
memcpy(ret_buf, &val, 4); |
| 1401 |
} |
| 1402 |
|
| 1403 |
/* disable access to nvram interface */ |
| 1404 |
bxe_disable_nvram_access(sc); |
| 1405 |
bxe_release_nvram_lock(sc); |
| 1406 |
|
| 1407 |
return (rc); |
| 1408 |
} |
| 1409 |
|
| 1410 |
static int |
| 1411 |
bxe_nvram_write_dword(struct bxe_softc *sc, |
| 1412 |
uint32_t offset, |
| 1413 |
uint32_t val, |
| 1414 |
uint32_t cmd_flags) |
| 1415 |
{ |
| 1416 |
int count, i, rc; |
| 1417 |
|
| 1418 |
/* build the command word */ |
| 1419 |
cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); |
| 1420 |
|
| 1421 |
/* need to clear DONE bit separately */ |
| 1422 |
REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); |
| 1423 |
|
| 1424 |
/* write the data */ |
| 1425 |
REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); |
| 1426 |
|
| 1427 |
/* address of the NVRAM to write to */ |
| 1428 |
REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, |
| 1429 |
(offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); |
| 1430 |
|
| 1431 |
/* issue the write command */ |
| 1432 |
REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); |
| 1433 |
|
| 1434 |
/* adjust timeout for emulation/FPGA */ |
| 1435 |
count = NVRAM_TIMEOUT_COUNT; |
| 1436 |
if (CHIP_REV_IS_SLOW(sc)) { |
| 1437 |
count *= 100; |
| 1438 |
} |
| 1439 |
|
| 1440 |
/* wait for completion */ |
| 1441 |
rc = -1; |
| 1442 |
for (i = 0; i < count; i++) { |
| 1443 |
DELAY(5); |
| 1444 |
val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); |
| 1445 |
if (val & MCPR_NVM_COMMAND_DONE) { |
| 1446 |
rc = 0; |
| 1447 |
break; |
| 1448 |
} |
| 1449 |
} |
| 1450 |
|
| 1451 |
if (rc == -1) { |
| 1452 |
BLOGE(sc, "nvram write timeout expired\n"); |
| 1453 |
} |
| 1454 |
|
| 1455 |
return (rc); |
| 1456 |
} |
| 1457 |
|
| 1458 |
#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) |
| 1459 |
|
| 1460 |
static int |
| 1461 |
bxe_nvram_write1(struct bxe_softc *sc, |
| 1462 |
uint32_t offset, |
| 1463 |
uint8_t *data_buf, |
| 1464 |
int buf_size) |
| 1465 |
{ |
| 1466 |
uint32_t cmd_flags; |
| 1467 |
uint32_t align_offset; |
| 1468 |
uint32_t val; |
| 1469 |
int rc; |
| 1470 |
|
| 1471 |
if ((offset + buf_size) > sc->devinfo.flash_size) { |
| 1472 |
BLOGE(sc, "Invalid parameter, " |
| 1473 |
"offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", |
| 1474 |
offset, buf_size, sc->devinfo.flash_size); |
| 1475 |
return (-1); |
| 1476 |
} |
| 1477 |
|
| 1478 |
/* request access to nvram interface */ |
| 1479 |
rc = bxe_acquire_nvram_lock(sc); |
| 1480 |
if (rc) { |
| 1481 |
return (rc); |
| 1482 |
} |
| 1483 |
|
| 1484 |
/* enable access to nvram interface */ |
| 1485 |
bxe_enable_nvram_access(sc); |
| 1486 |
|
| 1487 |
cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); |
| 1488 |
align_offset = (offset & ~0x03); |
| 1489 |
rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); |
| 1490 |
|
| 1491 |
if (rc == 0) { |
| 1492 |
val &= ~(0xff << BYTE_OFFSET(offset)); |
| 1493 |
val |= (*data_buf << BYTE_OFFSET(offset)); |
| 1494 |
|
| 1495 |
/* nvram data is returned as an array of bytes |
| 1496 |
* convert it back to cpu order |
| 1497 |
*/ |
| 1498 |
val = be32toh(val); |
| 1499 |
|
| 1500 |
rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); |
| 1501 |
} |
| 1502 |
|
| 1503 |
/* disable access to nvram interface */ |
| 1504 |
bxe_disable_nvram_access(sc); |
| 1505 |
bxe_release_nvram_lock(sc); |
| 1506 |
|
| 1507 |
return (rc); |
| 1508 |
} |
| 1509 |
|
| 1510 |
static int |
| 1511 |
bxe_nvram_write(struct bxe_softc *sc, |
| 1512 |
uint32_t offset, |
| 1513 |
uint8_t *data_buf, |
| 1514 |
int buf_size) |
| 1515 |
{ |
| 1516 |
uint32_t cmd_flags; |
| 1517 |
uint32_t val; |
| 1518 |
uint32_t written_so_far; |
| 1519 |
int rc; |
| 1520 |
|
| 1521 |
if (buf_size == 1) { |
| 1522 |
return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); |
| 1523 |
} |
| 1524 |
|
| 1525 |
if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { |
| 1526 |
BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", |
| 1527 |
offset, buf_size); |
| 1528 |
return (-1); |
| 1529 |
} |
| 1530 |
|
| 1531 |
if (buf_size == 0) { |
| 1532 |
return (0); /* nothing to do */ |
| 1533 |
} |
| 1534 |
|
| 1535 |
if ((offset + buf_size) > sc->devinfo.flash_size) { |
| 1536 |
BLOGE(sc, "Invalid parameter, " |
| 1537 |
"offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", |
| 1538 |
offset, buf_size, sc->devinfo.flash_size); |
| 1539 |
return (-1); |
| 1540 |
} |
| 1541 |
|
| 1542 |
/* request access to nvram interface */ |
| 1543 |
rc = bxe_acquire_nvram_lock(sc); |
| 1544 |
if (rc) { |
| 1545 |
return (rc); |
| 1546 |
} |
| 1547 |
|
| 1548 |
/* enable access to nvram interface */ |
| 1549 |
bxe_enable_nvram_access(sc); |
| 1550 |
|
| 1551 |
written_so_far = 0; |
| 1552 |
cmd_flags = MCPR_NVM_COMMAND_FIRST; |
| 1553 |
while ((written_so_far < buf_size) && (rc == 0)) { |
| 1554 |
if (written_so_far == (buf_size - sizeof(uint32_t))) { |
| 1555 |
cmd_flags |= MCPR_NVM_COMMAND_LAST; |
| 1556 |
} else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { |
| 1557 |
cmd_flags |= MCPR_NVM_COMMAND_LAST; |
| 1558 |
} else if ((offset % NVRAM_PAGE_SIZE) == 0) { |
| 1559 |
cmd_flags |= MCPR_NVM_COMMAND_FIRST; |
| 1560 |
} |
| 1561 |
|
| 1562 |
memcpy(&val, data_buf, 4); |
| 1563 |
|
| 1564 |
rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); |
| 1565 |
|
| 1566 |
/* advance to the next dword */ |
| 1567 |
offset += sizeof(uint32_t); |
| 1568 |
data_buf += sizeof(uint32_t); |
| 1569 |
written_so_far += sizeof(uint32_t); |
| 1570 |
cmd_flags = 0; |
| 1571 |
} |
| 1572 |
|
| 1573 |
/* disable access to nvram interface */ |
| 1574 |
bxe_disable_nvram_access(sc); |
| 1575 |
bxe_release_nvram_lock(sc); |
| 1576 |
|
| 1577 |
return (rc); |
| 1578 |
} |
| 1579 |
|
| 1580 |
/* copy command into DMAE command memory and set DMAE command Go */ |
| 1581 |
void |
| 1582 |
bxe_post_dmae(struct bxe_softc *sc, |
| 1583 |
struct dmae_command *dmae, |
| 1584 |
int idx) |
| 1585 |
{ |
| 1586 |
uint32_t cmd_offset; |
| 1587 |
int i; |
| 1588 |
|
| 1589 |
cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_command) * idx)); |
| 1590 |
for (i = 0; i < ((sizeof(struct dmae_command) / 4)); i++) { |
| 1591 |
REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); |
| 1592 |
} |
| 1593 |
|
| 1594 |
REG_WR(sc, dmae_reg_go_c[idx], 1); |
| 1595 |
} |
| 1596 |
|
| 1597 |
uint32_t |
| 1598 |
bxe_dmae_opcode_add_comp(uint32_t opcode, |
| 1599 |
uint8_t comp_type) |
| 1600 |
{ |
| 1601 |
return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | |
| 1602 |
DMAE_COMMAND_C_TYPE_ENABLE)); |
| 1603 |
} |
| 1604 |
|
| 1605 |
uint32_t |
| 1606 |
bxe_dmae_opcode_clr_src_reset(uint32_t opcode) |
| 1607 |
{ |
| 1608 |
return (opcode & ~DMAE_COMMAND_SRC_RESET); |
| 1609 |
} |
| 1610 |
|
| 1611 |
uint32_t |
| 1612 |
bxe_dmae_opcode(struct bxe_softc *sc, |
| 1613 |
uint8_t src_type, |
| 1614 |
uint8_t dst_type, |
| 1615 |
uint8_t with_comp, |
| 1616 |
uint8_t comp_type) |
| 1617 |
{ |
| 1618 |
uint32_t opcode = 0; |
| 1619 |
|
| 1620 |
opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | |
| 1621 |
(dst_type << DMAE_COMMAND_DST_SHIFT)); |
| 1622 |
|
| 1623 |
opcode |= (DMAE_COMMAND_SRC_RESET | DMAE_COMMAND_DST_RESET); |
| 1624 |
|
| 1625 |
opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); |
| 1626 |
|
| 1627 |
opcode |= ((SC_VN(sc) << DMAE_COMMAND_E1HVN_SHIFT) | |
| 1628 |
(SC_VN(sc) << DMAE_COMMAND_DST_VN_SHIFT)); |
| 1629 |
|
| 1630 |
opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); |
| 1631 |
|
| 1632 |
#ifdef __BIG_ENDIAN |
| 1633 |
opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; |
| 1634 |
#else |
| 1635 |
opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; |
| 1636 |
#endif |
| 1637 |
|
| 1638 |
if (with_comp) { |
| 1639 |
opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); |
| 1640 |
} |
| 1641 |
|
| 1642 |
return (opcode); |
| 1643 |
} |
| 1644 |
|
| 1645 |
static void |
| 1646 |
bxe_prep_dmae_with_comp(struct bxe_softc *sc, |
| 1647 |
struct dmae_command *dmae, |
| 1648 |
uint8_t src_type, |
| 1649 |
uint8_t dst_type) |
| 1650 |
{ |
| 1651 |
memset(dmae, 0, sizeof(struct dmae_command)); |
| 1652 |
|
| 1653 |
/* set the opcode */ |
| 1654 |
dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, |
| 1655 |
TRUE, DMAE_COMP_PCI); |
| 1656 |
|
| 1657 |
/* fill in the completion parameters */ |
| 1658 |
dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); |
| 1659 |
dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); |
| 1660 |
dmae->comp_val = DMAE_COMP_VAL; |
| 1661 |
} |
| 1662 |
|
| 1663 |
/* issue a DMAE command over the init channel and wait for completion */ |
| 1664 |
static int |
| 1665 |
bxe_issue_dmae_with_comp(struct bxe_softc *sc, |
| 1666 |
struct dmae_command *dmae) |
| 1667 |
{ |
| 1668 |
uint32_t *wb_comp = BXE_SP(sc, wb_comp); |
| 1669 |
int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; |
| 1670 |
|
| 1671 |
BXE_DMAE_LOCK(sc); |
| 1672 |
|
| 1673 |
/* reset completion */ |
| 1674 |
*wb_comp = 0; |
| 1675 |
|
| 1676 |
/* post the command on the channel used for initializations */ |
| 1677 |
bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); |
| 1678 |
|
| 1679 |
/* wait for completion */ |
| 1680 |
DELAY(5); |
| 1681 |
|
| 1682 |
while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { |
| 1683 |
if (!timeout || |
| 1684 |
(sc->recovery_state != BXE_RECOVERY_DONE && |
| 1685 |
sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { |
| 1686 |
BLOGE(sc, "DMAE timeout!\n"); |
| 1687 |
BXE_DMAE_UNLOCK(sc); |
| 1688 |
return (DMAE_TIMEOUT); |
| 1689 |
} |
| 1690 |
|
| 1691 |
timeout--; |
| 1692 |
DELAY(50); |
| 1693 |
} |
| 1694 |
|
| 1695 |
if (*wb_comp & DMAE_PCI_ERR_FLAG) { |
| 1696 |
BLOGE(sc, "DMAE PCI error!\n"); |
| 1697 |
BXE_DMAE_UNLOCK(sc); |
| 1698 |
return (DMAE_PCI_ERROR); |
| 1699 |
} |
| 1700 |
|
| 1701 |
BXE_DMAE_UNLOCK(sc); |
| 1702 |
return (0); |
| 1703 |
} |
| 1704 |
|
| 1705 |
void |
| 1706 |
bxe_read_dmae(struct bxe_softc *sc, |
| 1707 |
uint32_t src_addr, |
| 1708 |
uint32_t len32) |
| 1709 |
{ |
| 1710 |
struct dmae_command dmae; |
| 1711 |
uint32_t *data; |
| 1712 |
int i, rc; |
| 1713 |
|
| 1714 |
DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); |
| 1715 |
|
| 1716 |
if (!sc->dmae_ready) { |
| 1717 |
data = BXE_SP(sc, wb_data[0]); |
| 1718 |
|
| 1719 |
for (i = 0; i < len32; i++) { |
| 1720 |
data[i] = (CHIP_IS_E1(sc)) ? |
| 1721 |
bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : |
| 1722 |
REG_RD(sc, (src_addr + (i * 4))); |
| 1723 |
} |
| 1724 |
|
| 1725 |
return; |
| 1726 |
} |
| 1727 |
|
| 1728 |
/* set opcode and fixed command fields */ |
| 1729 |
bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); |
| 1730 |
|
| 1731 |
/* fill in addresses and len */ |
| 1732 |
dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ |
| 1733 |
dmae.src_addr_hi = 0; |
| 1734 |
dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); |
| 1735 |
dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); |
| 1736 |
dmae.len = len32; |
| 1737 |
|
| 1738 |
/* issue the command and wait for completion */ |
| 1739 |
if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { |
| 1740 |
bxe_panic(sc, ("DMAE failed (%d)\n", rc)); |
| 1741 |
}; |
| 1742 |
} |
| 1743 |
|
| 1744 |
void |
| 1745 |
bxe_write_dmae(struct bxe_softc *sc, |
| 1746 |
bus_addr_t dma_addr, |
| 1747 |
uint32_t dst_addr, |
| 1748 |
uint32_t len32) |
| 1749 |
{ |
| 1750 |
struct dmae_command dmae; |
| 1751 |
int rc; |
| 1752 |
|
| 1753 |
if (!sc->dmae_ready) { |
| 1754 |
DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); |
| 1755 |
|
| 1756 |
if (CHIP_IS_E1(sc)) { |
| 1757 |
ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); |
| 1758 |
} else { |
| 1759 |
ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); |
| 1760 |
} |
| 1761 |
|
| 1762 |
return; |
| 1763 |
} |
| 1764 |
|
| 1765 |
/* set opcode and fixed command fields */ |
| 1766 |
bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); |
| 1767 |
|
| 1768 |
/* fill in addresses and len */ |
| 1769 |
dmae.src_addr_lo = U64_LO(dma_addr); |
| 1770 |
dmae.src_addr_hi = U64_HI(dma_addr); |
| 1771 |
dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ |
| 1772 |
dmae.dst_addr_hi = 0; |
| 1773 |
dmae.len = len32; |
| 1774 |
|
| 1775 |
/* issue the command and wait for completion */ |
| 1776 |
if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { |
| 1777 |
bxe_panic(sc, ("DMAE failed (%d)\n", rc)); |
| 1778 |
} |
| 1779 |
} |
| 1780 |
|
| 1781 |
void |
| 1782 |
bxe_write_dmae_phys_len(struct bxe_softc *sc, |
| 1783 |
bus_addr_t phys_addr, |
| 1784 |
uint32_t addr, |
| 1785 |
uint32_t len) |
| 1786 |
{ |
| 1787 |
int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); |
| 1788 |
int offset = 0; |
| 1789 |
|
| 1790 |
while (len > dmae_wr_max) { |
| 1791 |
bxe_write_dmae(sc, |
| 1792 |
(phys_addr + offset), /* src DMA address */ |
| 1793 |
(addr + offset), /* dst GRC address */ |
| 1794 |
dmae_wr_max); |
| 1795 |
offset += (dmae_wr_max * 4); |
| 1796 |
len -= dmae_wr_max; |
| 1797 |
} |
| 1798 |
|
| 1799 |
bxe_write_dmae(sc, |
| 1800 |
(phys_addr + offset), /* src DMA address */ |
| 1801 |
(addr + offset), /* dst GRC address */ |
| 1802 |
len); |
| 1803 |
} |
| 1804 |
|
| 1805 |
void |
| 1806 |
bxe_set_ctx_validation(struct bxe_softc *sc, |
| 1807 |
struct eth_context *cxt, |
| 1808 |
uint32_t cid) |
| 1809 |
{ |
| 1810 |
/* ustorm cxt validation */ |
| 1811 |
cxt->ustorm_ag_context.cdu_usage = |
| 1812 |
CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), |
| 1813 |
CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); |
| 1814 |
/* xcontext validation */ |
| 1815 |
cxt->xstorm_ag_context.cdu_reserved = |
| 1816 |
CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), |
| 1817 |
CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); |
| 1818 |
} |
| 1819 |
|
| 1820 |
static void |
| 1821 |
bxe_storm_memset_hc_timeout(struct bxe_softc *sc, |
| 1822 |
uint8_t port, |
| 1823 |
uint8_t fw_sb_id, |
| 1824 |
uint8_t sb_index, |
| 1825 |
uint8_t ticks) |
| 1826 |
{ |
| 1827 |
uint32_t addr = |
| 1828 |
(BAR_CSTRORM_INTMEM + |
| 1829 |
CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); |
| 1830 |
|
| 1831 |
REG_WR8(sc, addr, ticks); |
| 1832 |
|
| 1833 |
BLOGD(sc, DBG_LOAD, |
| 1834 |
"port %d fw_sb_id %d sb_index %d ticks %d\n", |
| 1835 |
port, fw_sb_id, sb_index, ticks); |
| 1836 |
} |
| 1837 |
|
| 1838 |
static void |
| 1839 |
bxe_storm_memset_hc_disable(struct bxe_softc *sc, |
| 1840 |
uint8_t port, |
| 1841 |
uint16_t fw_sb_id, |
| 1842 |
uint8_t sb_index, |
| 1843 |
uint8_t disable) |
| 1844 |
{ |
| 1845 |
uint32_t enable_flag = |
| 1846 |
(disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); |
| 1847 |
uint32_t addr = |
| 1848 |
(BAR_CSTRORM_INTMEM + |
| 1849 |
CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); |
| 1850 |
uint8_t flags; |
| 1851 |
|
| 1852 |
/* clear and set */ |
| 1853 |
flags = REG_RD8(sc, addr); |
| 1854 |
flags &= ~HC_INDEX_DATA_HC_ENABLED; |
| 1855 |
flags |= enable_flag; |
| 1856 |
REG_WR8(sc, addr, flags); |
| 1857 |
|
| 1858 |
BLOGD(sc, DBG_LOAD, |
| 1859 |
"port %d fw_sb_id %d sb_index %d disable %d\n", |
| 1860 |
port, fw_sb_id, sb_index, disable); |
| 1861 |
} |
| 1862 |
|
| 1863 |
void |
| 1864 |
bxe_update_coalesce_sb_index(struct bxe_softc *sc, |
| 1865 |
uint8_t fw_sb_id, |
| 1866 |
uint8_t sb_index, |
| 1867 |
uint8_t disable, |
| 1868 |
uint16_t usec) |
| 1869 |
{ |
| 1870 |
int port = SC_PORT(sc); |
| 1871 |
uint8_t ticks = (usec / 4); /* XXX ??? */ |
| 1872 |
|
| 1873 |
bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); |
| 1874 |
|
| 1875 |
disable = (disable) ? 1 : ((usec) ? 0 : 1); |
| 1876 |
bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); |
| 1877 |
} |
| 1878 |
|
| 1879 |
void |
| 1880 |
elink_cb_udelay(struct bxe_softc *sc, |
| 1881 |
uint32_t usecs) |
| 1882 |
{ |
| 1883 |
DELAY(usecs); |
| 1884 |
} |
| 1885 |
|
| 1886 |
uint32_t |
| 1887 |
elink_cb_reg_read(struct bxe_softc *sc, |
| 1888 |
uint32_t reg_addr) |
| 1889 |
{ |
| 1890 |
return (REG_RD(sc, reg_addr)); |
| 1891 |
} |
| 1892 |
|
| 1893 |
void |
| 1894 |
elink_cb_reg_write(struct bxe_softc *sc, |
| 1895 |
uint32_t reg_addr, |
| 1896 |
uint32_t val) |
| 1897 |
{ |
| 1898 |
REG_WR(sc, reg_addr, val); |
| 1899 |
} |
| 1900 |
|
| 1901 |
void |
| 1902 |
elink_cb_reg_wb_write(struct bxe_softc *sc, |
| 1903 |
uint32_t offset, |
| 1904 |
uint32_t *wb_write, |
| 1905 |
uint16_t len) |
| 1906 |
{ |
| 1907 |
REG_WR_DMAE(sc, offset, wb_write, len); |
| 1908 |
} |
| 1909 |
|
| 1910 |
void |
| 1911 |
elink_cb_reg_wb_read(struct bxe_softc *sc, |
| 1912 |
uint32_t offset, |
| 1913 |
uint32_t *wb_write, |
| 1914 |
uint16_t len) |
| 1915 |
{ |
| 1916 |
REG_RD_DMAE(sc, offset, wb_write, len); |
| 1917 |
} |
| 1918 |
|
| 1919 |
uint8_t |
| 1920 |
elink_cb_path_id(struct bxe_softc *sc) |
| 1921 |
{ |
| 1922 |
return (SC_PATH(sc)); |
| 1923 |
} |
| 1924 |
|
| 1925 |
void |
| 1926 |
elink_cb_event_log(struct bxe_softc *sc, |
| 1927 |
const elink_log_id_t elink_log_id, |
| 1928 |
...) |
| 1929 |
{ |
| 1930 |
/* XXX */ |
| 1931 |
#if 0 |
| 1932 |
//va_list ap; |
| 1933 |
va_start(ap, elink_log_id); |
| 1934 |
_XXX_(sc, lm_log_id, ap); |
| 1935 |
va_end(ap); |
| 1936 |
#endif |
| 1937 |
BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); |
| 1938 |
} |
| 1939 |
|
| 1940 |
static int |
| 1941 |
bxe_set_spio(struct bxe_softc *sc, |
| 1942 |
int spio, |
| 1943 |
uint32_t mode) |
| 1944 |
{ |
| 1945 |
uint32_t spio_reg; |
| 1946 |
|
| 1947 |
/* Only 2 SPIOs are configurable */ |
| 1948 |
if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { |
| 1949 |
BLOGE(sc, "Invalid SPIO 0x%x\n", spio); |
| 1950 |
return (-1); |
| 1951 |
} |
| 1952 |
|
| 1953 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); |
| 1954 |
|
| 1955 |
/* read SPIO and mask except the float bits */ |
| 1956 |
spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); |
| 1957 |
|
| 1958 |
switch (mode) { |
| 1959 |
case MISC_SPIO_OUTPUT_LOW: |
| 1960 |
BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); |
| 1961 |
/* clear FLOAT and set CLR */ |
| 1962 |
spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); |
| 1963 |
spio_reg |= (spio << MISC_SPIO_CLR_POS); |
| 1964 |
break; |
| 1965 |
|
| 1966 |
case MISC_SPIO_OUTPUT_HIGH: |
| 1967 |
BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); |
| 1968 |
/* clear FLOAT and set SET */ |
| 1969 |
spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); |
| 1970 |
spio_reg |= (spio << MISC_SPIO_SET_POS); |
| 1971 |
break; |
| 1972 |
|
| 1973 |
case MISC_SPIO_INPUT_HI_Z: |
| 1974 |
BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); |
| 1975 |
/* set FLOAT */ |
| 1976 |
spio_reg |= (spio << MISC_SPIO_FLOAT_POS); |
| 1977 |
break; |
| 1978 |
|
| 1979 |
default: |
| 1980 |
break; |
| 1981 |
} |
| 1982 |
|
| 1983 |
REG_WR(sc, MISC_REG_SPIO, spio_reg); |
| 1984 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); |
| 1985 |
|
| 1986 |
return (0); |
| 1987 |
} |
| 1988 |
|
| 1989 |
static int |
| 1990 |
bxe_gpio_read(struct bxe_softc *sc, |
| 1991 |
int gpio_num, |
| 1992 |
uint8_t port) |
| 1993 |
{ |
| 1994 |
/* The GPIO should be swapped if swap register is set and active */ |
| 1995 |
int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && |
| 1996 |
REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); |
| 1997 |
int gpio_shift = (gpio_num + |
| 1998 |
(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); |
| 1999 |
uint32_t gpio_mask = (1 << gpio_shift); |
| 2000 |
uint32_t gpio_reg; |
| 2001 |
|
| 2002 |
if (gpio_num > MISC_REGISTERS_GPIO_3) { |
| 2003 |
BLOGE(sc, "Invalid GPIO %d\n", gpio_num); |
| 2004 |
return (-1); |
| 2005 |
} |
| 2006 |
|
| 2007 |
/* read GPIO value */ |
| 2008 |
gpio_reg = REG_RD(sc, MISC_REG_GPIO); |
| 2009 |
|
| 2010 |
/* get the requested pin value */ |
| 2011 |
return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; |
| 2012 |
} |
| 2013 |
|
| 2014 |
static int |
| 2015 |
bxe_gpio_write(struct bxe_softc *sc, |
| 2016 |
int gpio_num, |
| 2017 |
uint32_t mode, |
| 2018 |
uint8_t port) |
| 2019 |
{ |
| 2020 |
/* The GPIO should be swapped if swap register is set and active */ |
| 2021 |
int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && |
| 2022 |
REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); |
| 2023 |
int gpio_shift = (gpio_num + |
| 2024 |
(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); |
| 2025 |
uint32_t gpio_mask = (1 << gpio_shift); |
| 2026 |
uint32_t gpio_reg; |
| 2027 |
|
| 2028 |
if (gpio_num > MISC_REGISTERS_GPIO_3) { |
| 2029 |
BLOGE(sc, "Invalid GPIO %d\n", gpio_num); |
| 2030 |
return (-1); |
| 2031 |
} |
| 2032 |
|
| 2033 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); |
| 2034 |
|
| 2035 |
/* read GPIO and mask except the float bits */ |
| 2036 |
gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); |
| 2037 |
|
| 2038 |
switch (mode) { |
| 2039 |
case MISC_REGISTERS_GPIO_OUTPUT_LOW: |
| 2040 |
BLOGD(sc, DBG_PHY, |
| 2041 |
"Set GPIO %d (shift %d) -> output low\n", |
| 2042 |
gpio_num, gpio_shift); |
| 2043 |
/* clear FLOAT and set CLR */ |
| 2044 |
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); |
| 2045 |
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); |
| 2046 |
break; |
| 2047 |
|
| 2048 |
case MISC_REGISTERS_GPIO_OUTPUT_HIGH: |
| 2049 |
BLOGD(sc, DBG_PHY, |
| 2050 |
"Set GPIO %d (shift %d) -> output high\n", |
| 2051 |
gpio_num, gpio_shift); |
| 2052 |
/* clear FLOAT and set SET */ |
| 2053 |
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); |
| 2054 |
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); |
| 2055 |
break; |
| 2056 |
|
| 2057 |
case MISC_REGISTERS_GPIO_INPUT_HI_Z: |
| 2058 |
BLOGD(sc, DBG_PHY, |
| 2059 |
"Set GPIO %d (shift %d) -> input\n", |
| 2060 |
gpio_num, gpio_shift); |
| 2061 |
/* set FLOAT */ |
| 2062 |
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); |
| 2063 |
break; |
| 2064 |
|
| 2065 |
default: |
| 2066 |
break; |
| 2067 |
} |
| 2068 |
|
| 2069 |
REG_WR(sc, MISC_REG_GPIO, gpio_reg); |
| 2070 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); |
| 2071 |
|
| 2072 |
return (0); |
| 2073 |
} |
| 2074 |
|
| 2075 |
static int |
| 2076 |
bxe_gpio_mult_write(struct bxe_softc *sc, |
| 2077 |
uint8_t pins, |
| 2078 |
uint32_t mode) |
| 2079 |
{ |
| 2080 |
uint32_t gpio_reg; |
| 2081 |
|
| 2082 |
/* any port swapping should be handled by caller */ |
| 2083 |
|
| 2084 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); |
| 2085 |
|
| 2086 |
/* read GPIO and mask except the float bits */ |
| 2087 |
gpio_reg = REG_RD(sc, MISC_REG_GPIO); |
| 2088 |
gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); |
| 2089 |
gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); |
| 2090 |
gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); |
| 2091 |
|
| 2092 |
switch (mode) { |
| 2093 |
case MISC_REGISTERS_GPIO_OUTPUT_LOW: |
| 2094 |
BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); |
| 2095 |
/* set CLR */ |
| 2096 |
gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); |
| 2097 |
break; |
| 2098 |
|
| 2099 |
case MISC_REGISTERS_GPIO_OUTPUT_HIGH: |
| 2100 |
BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); |
| 2101 |
/* set SET */ |
| 2102 |
gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); |
| 2103 |
break; |
| 2104 |
|
| 2105 |
case MISC_REGISTERS_GPIO_INPUT_HI_Z: |
| 2106 |
BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); |
| 2107 |
/* set FLOAT */ |
| 2108 |
gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); |
| 2109 |
break; |
| 2110 |
|
| 2111 |
default: |
| 2112 |
BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode); |
| 2113 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); |
| 2114 |
return (-1); |
| 2115 |
} |
| 2116 |
|
| 2117 |
REG_WR(sc, MISC_REG_GPIO, gpio_reg); |
| 2118 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); |
| 2119 |
|
| 2120 |
return (0); |
| 2121 |
} |
| 2122 |
|
| 2123 |
static int |
| 2124 |
bxe_gpio_int_write(struct bxe_softc *sc, |
| 2125 |
int gpio_num, |
| 2126 |
uint32_t mode, |
| 2127 |
uint8_t port) |
| 2128 |
{ |
| 2129 |
/* The GPIO should be swapped if swap register is set and active */ |
| 2130 |
int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && |
| 2131 |
REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); |
| 2132 |
int gpio_shift = (gpio_num + |
| 2133 |
(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); |
| 2134 |
uint32_t gpio_mask = (1 << gpio_shift); |
| 2135 |
uint32_t gpio_reg; |
| 2136 |
|
| 2137 |
if (gpio_num > MISC_REGISTERS_GPIO_3) { |
| 2138 |
BLOGE(sc, "Invalid GPIO %d\n", gpio_num); |
| 2139 |
return (-1); |
| 2140 |
} |
| 2141 |
|
| 2142 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); |
| 2143 |
|
| 2144 |
/* read GPIO int */ |
| 2145 |
gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); |
| 2146 |
|
| 2147 |
switch (mode) { |
| 2148 |
case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: |
| 2149 |
BLOGD(sc, DBG_PHY, |
| 2150 |
"Clear GPIO INT %d (shift %d) -> output low\n", |
| 2151 |
gpio_num, gpio_shift); |
| 2152 |
/* clear SET and set CLR */ |
| 2153 |
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); |
| 2154 |
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); |
| 2155 |
break; |
| 2156 |
|
| 2157 |
case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: |
| 2158 |
BLOGD(sc, DBG_PHY, |
| 2159 |
"Set GPIO INT %d (shift %d) -> output high\n", |
| 2160 |
gpio_num, gpio_shift); |
| 2161 |
/* clear CLR and set SET */ |
| 2162 |
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); |
| 2163 |
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); |
| 2164 |
break; |
| 2165 |
|
| 2166 |
default: |
| 2167 |
break; |
| 2168 |
} |
| 2169 |
|
| 2170 |
REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); |
| 2171 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); |
| 2172 |
|
| 2173 |
return (0); |
| 2174 |
} |
| 2175 |
|
| 2176 |
uint32_t |
| 2177 |
elink_cb_gpio_read(struct bxe_softc *sc, |
| 2178 |
uint16_t gpio_num, |
| 2179 |
uint8_t port) |
| 2180 |
{ |
| 2181 |
return (bxe_gpio_read(sc, gpio_num, port)); |
| 2182 |
} |
| 2183 |
|
| 2184 |
uint8_t |
| 2185 |
elink_cb_gpio_write(struct bxe_softc *sc, |
| 2186 |
uint16_t gpio_num, |
| 2187 |
uint8_t mode, /* 0=low 1=high */ |
| 2188 |
uint8_t port) |
| 2189 |
{ |
| 2190 |
return (bxe_gpio_write(sc, gpio_num, mode, port)); |
| 2191 |
} |
| 2192 |
|
| 2193 |
uint8_t |
| 2194 |
elink_cb_gpio_mult_write(struct bxe_softc *sc, |
| 2195 |
uint8_t pins, |
| 2196 |
uint8_t mode) /* 0=low 1=high */ |
| 2197 |
{ |
| 2198 |
return (bxe_gpio_mult_write(sc, pins, mode)); |
| 2199 |
} |
| 2200 |
|
| 2201 |
uint8_t |
| 2202 |
elink_cb_gpio_int_write(struct bxe_softc *sc, |
| 2203 |
uint16_t gpio_num, |
| 2204 |
uint8_t mode, /* 0=low 1=high */ |
| 2205 |
uint8_t port) |
| 2206 |
{ |
| 2207 |
return (bxe_gpio_int_write(sc, gpio_num, mode, port)); |
| 2208 |
} |
| 2209 |
|
| 2210 |
void |
| 2211 |
elink_cb_notify_link_changed(struct bxe_softc *sc) |
| 2212 |
{ |
| 2213 |
REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + |
| 2214 |
(SC_FUNC(sc) * sizeof(uint32_t))), 1); |
| 2215 |
} |
| 2216 |
|
| 2217 |
/* send the MCP a request, block until there is a reply */ |
| 2218 |
uint32_t |
| 2219 |
elink_cb_fw_command(struct bxe_softc *sc, |
| 2220 |
uint32_t command, |
| 2221 |
uint32_t param) |
| 2222 |
{ |
| 2223 |
int mb_idx = SC_FW_MB_IDX(sc); |
| 2224 |
uint32_t seq; |
| 2225 |
uint32_t rc = 0; |
| 2226 |
uint32_t cnt = 1; |
| 2227 |
uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; |
| 2228 |
|
| 2229 |
BXE_FWMB_LOCK(sc); |
| 2230 |
|
| 2231 |
seq = ++sc->fw_seq; |
| 2232 |
SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); |
| 2233 |
SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); |
| 2234 |
|
| 2235 |
BLOGD(sc, DBG_PHY, |
| 2236 |
"wrote command 0x%08x to FW MB param 0x%08x\n", |
| 2237 |
(command | seq), param); |
| 2238 |
|
| 2239 |
/* Let the FW do it's magic. GIve it up to 5 seconds... */ |
| 2240 |
do { |
| 2241 |
DELAY(delay * 1000); |
| 2242 |
rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); |
| 2243 |
} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); |
| 2244 |
|
| 2245 |
BLOGD(sc, DBG_PHY, |
| 2246 |
"[after %d ms] read 0x%x seq 0x%x from FW MB\n", |
| 2247 |
cnt*delay, rc, seq); |
| 2248 |
|
| 2249 |
/* is this a reply to our command? */ |
| 2250 |
if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { |
| 2251 |
rc &= FW_MSG_CODE_MASK; |
| 2252 |
} else { |
| 2253 |
/* Ruh-roh! */ |
| 2254 |
BLOGE(sc, "FW failed to respond!\n"); |
| 2255 |
// XXX bxe_fw_dump(sc); |
| 2256 |
rc = 0; |
| 2257 |
} |
| 2258 |
|
| 2259 |
BXE_FWMB_UNLOCK(sc); |
| 2260 |
return (rc); |
| 2261 |
} |
| 2262 |
|
| 2263 |
static uint32_t |
| 2264 |
bxe_fw_command(struct bxe_softc *sc, |
| 2265 |
uint32_t command, |
| 2266 |
uint32_t param) |
| 2267 |
{ |
| 2268 |
return (elink_cb_fw_command(sc, command, param)); |
| 2269 |
} |
| 2270 |
|
| 2271 |
static void |
| 2272 |
__storm_memset_dma_mapping(struct bxe_softc *sc, |
| 2273 |
uint32_t addr, |
| 2274 |
bus_addr_t mapping) |
| 2275 |
{ |
| 2276 |
REG_WR(sc, addr, U64_LO(mapping)); |
| 2277 |
REG_WR(sc, (addr + 4), U64_HI(mapping)); |
| 2278 |
} |
| 2279 |
|
| 2280 |
static void |
| 2281 |
storm_memset_spq_addr(struct bxe_softc *sc, |
| 2282 |
bus_addr_t mapping, |
| 2283 |
uint16_t abs_fid) |
| 2284 |
{ |
| 2285 |
uint32_t addr = (XSEM_REG_FAST_MEMORY + |
| 2286 |
XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); |
| 2287 |
__storm_memset_dma_mapping(sc, addr, mapping); |
| 2288 |
} |
| 2289 |
|
| 2290 |
static void |
| 2291 |
storm_memset_vf_to_pf(struct bxe_softc *sc, |
| 2292 |
uint16_t abs_fid, |
| 2293 |
uint16_t pf_id) |
| 2294 |
{ |
| 2295 |
REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); |
| 2296 |
REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); |
| 2297 |
REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); |
| 2298 |
REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); |
| 2299 |
} |
| 2300 |
|
| 2301 |
static void |
| 2302 |
storm_memset_func_en(struct bxe_softc *sc, |
| 2303 |
uint16_t abs_fid, |
| 2304 |
uint8_t enable) |
| 2305 |
{ |
| 2306 |
REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); |
| 2307 |
REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); |
| 2308 |
REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); |
| 2309 |
REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); |
| 2310 |
} |
| 2311 |
|
| 2312 |
static void |
| 2313 |
storm_memset_eq_data(struct bxe_softc *sc, |
| 2314 |
struct event_ring_data *eq_data, |
| 2315 |
uint16_t pfid) |
| 2316 |
{ |
| 2317 |
uint32_t addr; |
| 2318 |
size_t size; |
| 2319 |
|
| 2320 |
addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); |
| 2321 |
size = sizeof(struct event_ring_data); |
| 2322 |
ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); |
| 2323 |
} |
| 2324 |
|
| 2325 |
static void |
| 2326 |
storm_memset_eq_prod(struct bxe_softc *sc, |
| 2327 |
uint16_t eq_prod, |
| 2328 |
uint16_t pfid) |
| 2329 |
{ |
| 2330 |
uint32_t addr = (BAR_CSTRORM_INTMEM + |
| 2331 |
CSTORM_EVENT_RING_PROD_OFFSET(pfid)); |
| 2332 |
REG_WR16(sc, addr, eq_prod); |
| 2333 |
} |
| 2334 |
|
| 2335 |
/* |
| 2336 |
* Post a slowpath command. |
| 2337 |
* |
| 2338 |
* A slowpath command is used to propogate a configuration change through |
| 2339 |
* the controller in a controlled manner, allowing each STORM processor and |
| 2340 |
* other H/W blocks to phase in the change. The commands sent on the |
| 2341 |
* slowpath are referred to as ramrods. Depending on the ramrod used the |
| 2342 |
* completion of the ramrod will occur in different ways. Here's a |
| 2343 |
* breakdown of ramrods and how they complete: |
| 2344 |
* |
| 2345 |
* RAMROD_CMD_ID_ETH_PORT_SETUP |
| 2346 |
* Used to setup the leading connection on a port. Completes on the |
| 2347 |
* Receive Completion Queue (RCQ) of that port (typically fp[0]). |
| 2348 |
* |
| 2349 |
* RAMROD_CMD_ID_ETH_CLIENT_SETUP |
| 2350 |
* Used to setup an additional connection on a port. Completes on the |
| 2351 |
* RCQ of the multi-queue/RSS connection being initialized. |
| 2352 |
* |
| 2353 |
* RAMROD_CMD_ID_ETH_STAT_QUERY |
| 2354 |
* Used to force the storm processors to update the statistics database |
| 2355 |
* in host memory. This ramrod is send on the leading connection CID and |
| 2356 |
* completes as an index increment of the CSTORM on the default status |
| 2357 |
* block. |
| 2358 |
* |
| 2359 |
* RAMROD_CMD_ID_ETH_UPDATE |
| 2360 |
* Used to update the state of the leading connection, usually to udpate |
| 2361 |
* the RSS indirection table. Completes on the RCQ of the leading |
| 2362 |
* connection. (Not currently used under FreeBSD until OS support becomes |
| 2363 |
* available.) |
| 2364 |
* |
| 2365 |
* RAMROD_CMD_ID_ETH_HALT |
| 2366 |
* Used when tearing down a connection prior to driver unload. Completes |
| 2367 |
* on the RCQ of the multi-queue/RSS connection being torn down. Don't |
| 2368 |
* use this on the leading connection. |
| 2369 |
* |
| 2370 |
* RAMROD_CMD_ID_ETH_SET_MAC |
| 2371 |
* Sets the Unicast/Broadcast/Multicast used by the port. Completes on |
| 2372 |
* the RCQ of the leading connection. |
| 2373 |
* |
| 2374 |
* RAMROD_CMD_ID_ETH_CFC_DEL |
| 2375 |
* Used when tearing down a conneciton prior to driver unload. Completes |
| 2376 |
* on the RCQ of the leading connection (since the current connection |
| 2377 |
* has been completely removed from controller memory). |
| 2378 |
* |
| 2379 |
* RAMROD_CMD_ID_ETH_PORT_DEL |
| 2380 |
* Used to tear down the leading connection prior to driver unload, |
| 2381 |
* typically fp[0]. Completes as an index increment of the CSTORM on the |
| 2382 |
* default status block. |
| 2383 |
* |
| 2384 |
* RAMROD_CMD_ID_ETH_FORWARD_SETUP |
| 2385 |
* Used for connection offload. Completes on the RCQ of the multi-queue |
| 2386 |
* RSS connection that is being offloaded. (Not currently used under |
| 2387 |
* FreeBSD.) |
| 2388 |
* |
| 2389 |
* There can only be one command pending per function. |
| 2390 |
* |
| 2391 |
* Returns: |
| 2392 |
* 0 = Success, !0 = Failure. |
| 2393 |
*/ |
| 2394 |
|
| 2395 |
/* must be called under the spq lock */ |
| 2396 |
static inline |
| 2397 |
struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) |
| 2398 |
{ |
| 2399 |
struct eth_spe *next_spe = sc->spq_prod_bd; |
| 2400 |
|
| 2401 |
if (sc->spq_prod_bd == sc->spq_last_bd) { |
| 2402 |
/* wrap back to the first eth_spq */ |
| 2403 |
sc->spq_prod_bd = sc->spq; |
| 2404 |
sc->spq_prod_idx = 0; |
| 2405 |
} else { |
| 2406 |
sc->spq_prod_bd++; |
| 2407 |
sc->spq_prod_idx++; |
| 2408 |
} |
| 2409 |
|
| 2410 |
return (next_spe); |
| 2411 |
} |
| 2412 |
|
| 2413 |
/* must be called under the spq lock */ |
| 2414 |
static inline |
| 2415 |
void bxe_sp_prod_update(struct bxe_softc *sc) |
| 2416 |
{ |
| 2417 |
int func = SC_FUNC(sc); |
| 2418 |
|
| 2419 |
/* |
| 2420 |
* Make sure that BD data is updated before writing the producer. |
| 2421 |
* BD data is written to the memory, the producer is read from the |
| 2422 |
* memory, thus we need a full memory barrier to ensure the ordering. |
| 2423 |
*/ |
| 2424 |
mb(); |
| 2425 |
|
| 2426 |
REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), |
| 2427 |
sc->spq_prod_idx); |
| 2428 |
|
| 2429 |
bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, |
| 2430 |
BUS_SPACE_BARRIER_WRITE); |
| 2431 |
} |
| 2432 |
|
| 2433 |
/** |
| 2434 |
* bxe_is_contextless_ramrod - check if the current command ends on EQ |
| 2435 |
* |
| 2436 |
* @cmd: command to check |
| 2437 |
* @cmd_type: command type |
| 2438 |
*/ |
| 2439 |
static inline |
| 2440 |
int bxe_is_contextless_ramrod(int cmd, |
| 2441 |
int cmd_type) |
| 2442 |
{ |
| 2443 |
if ((cmd_type == NONE_CONNECTION_TYPE) || |
| 2444 |
(cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || |
| 2445 |
(cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || |
| 2446 |
(cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || |
| 2447 |
(cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || |
| 2448 |
(cmd == RAMROD_CMD_ID_ETH_SET_MAC) || |
| 2449 |
(cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { |
| 2450 |
return (TRUE); |
| 2451 |
} else { |
| 2452 |
return (FALSE); |
| 2453 |
} |
| 2454 |
} |
| 2455 |
|
| 2456 |
/** |
| 2457 |
* bxe_sp_post - place a single command on an SP ring |
| 2458 |
* |
| 2459 |
* @sc: driver handle |
| 2460 |
* @command: command to place (e.g. SETUP, FILTER_RULES, etc.) |
| 2461 |
* @cid: SW CID the command is related to |
| 2462 |
* @data_hi: command private data address (high 32 bits) |
| 2463 |
* @data_lo: command private data address (low 32 bits) |
| 2464 |
* @cmd_type: command type (e.g. NONE, ETH) |
| 2465 |
* |
| 2466 |
* SP data is handled as if it's always an address pair, thus data fields are |
| 2467 |
* not swapped to little endian in upper functions. Instead this function swaps |
| 2468 |
* data as if it's two uint32 fields. |
| 2469 |
*/ |
| 2470 |
int |
| 2471 |
bxe_sp_post(struct bxe_softc *sc, |
| 2472 |
int command, |
| 2473 |
int cid, |
| 2474 |
uint32_t data_hi, |
| 2475 |
uint32_t data_lo, |
| 2476 |
int cmd_type) |
| 2477 |
{ |
| 2478 |
struct eth_spe *spe; |
| 2479 |
uint16_t type; |
| 2480 |
int common; |
| 2481 |
|
| 2482 |
common = bxe_is_contextless_ramrod(command, cmd_type); |
| 2483 |
|
| 2484 |
BXE_SP_LOCK(sc); |
| 2485 |
|
| 2486 |
if (common) { |
| 2487 |
if (!atomic_load_acq_long(&sc->eq_spq_left)) { |
| 2488 |
BLOGE(sc, "EQ ring is full!\n"); |
| 2489 |
BXE_SP_UNLOCK(sc); |
| 2490 |
return (-1); |
| 2491 |
} |
| 2492 |
} else { |
| 2493 |
if (!atomic_load_acq_long(&sc->cq_spq_left)) { |
| 2494 |
BLOGE(sc, "SPQ ring is full!\n"); |
| 2495 |
BXE_SP_UNLOCK(sc); |
| 2496 |
return (-1); |
| 2497 |
} |
| 2498 |
} |
| 2499 |
|
| 2500 |
spe = bxe_sp_get_next(sc); |
| 2501 |
|
| 2502 |
/* CID needs port number to be encoded int it */ |
| 2503 |
spe->hdr.conn_and_cmd_data = |
| 2504 |
htole32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(sc, cid)); |
| 2505 |
|
| 2506 |
type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; |
| 2507 |
|
| 2508 |
/* TBD: Check if it works for VFs */ |
| 2509 |
type |= ((SC_FUNC(sc) << SPE_HDR_FUNCTION_ID_SHIFT) & |
| 2510 |
SPE_HDR_FUNCTION_ID); |
| 2511 |
|
| 2512 |
spe->hdr.type = htole16(type); |
| 2513 |
|
| 2514 |
spe->data.update_data_addr.hi = htole32(data_hi); |
| 2515 |
spe->data.update_data_addr.lo = htole32(data_lo); |
| 2516 |
|
| 2517 |
/* |
| 2518 |
* It's ok if the actual decrement is issued towards the memory |
| 2519 |
* somewhere between the lock and unlock. Thus no more explict |
| 2520 |
* memory barrier is needed. |
| 2521 |
*/ |
| 2522 |
if (common) { |
| 2523 |
atomic_subtract_acq_long(&sc->eq_spq_left, 1); |
| 2524 |
} else { |
| 2525 |
atomic_subtract_acq_long(&sc->cq_spq_left, 1); |
| 2526 |
} |
| 2527 |
|
| 2528 |
BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); |
| 2529 |
BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", |
| 2530 |
BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); |
| 2531 |
BLOGD(sc, DBG_SP, |
| 2532 |
"SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", |
| 2533 |
sc->spq_prod_idx, |
| 2534 |
(uint32_t)U64_HI(sc->spq_dma.paddr), |
| 2535 |
(uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), |
| 2536 |
command, |
| 2537 |
common, |
| 2538 |
HW_CID(sc, cid), |
| 2539 |
data_hi, |
| 2540 |
data_lo, |
| 2541 |
type, |
| 2542 |
atomic_load_acq_long(&sc->cq_spq_left), |
| 2543 |
atomic_load_acq_long(&sc->eq_spq_left)); |
| 2544 |
|
| 2545 |
bxe_sp_prod_update(sc); |
| 2546 |
|
| 2547 |
BXE_SP_UNLOCK(sc); |
| 2548 |
return (0); |
| 2549 |
} |
| 2550 |
|
| 2551 |
/** |
| 2552 |
* bxe_debug_print_ind_table - prints the indirection table configuration. |
| 2553 |
* |
| 2554 |
* @sc: driver hanlde |
| 2555 |
* @p: pointer to rss configuration |
| 2556 |
*/ |
| 2557 |
#if 0 |
| 2558 |
static void |
| 2559 |
bxe_debug_print_ind_table(struct bxe_softc *sc, |
| 2560 |
struct ecore_config_rss_params *p) |
| 2561 |
{ |
| 2562 |
int i; |
| 2563 |
|
| 2564 |
BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n"); |
| 2565 |
BLOGD(sc, DBG_LOAD, " 0x0000: "); |
| 2566 |
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { |
| 2567 |
BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]); |
| 2568 |
|
| 2569 |
/* Print 4 bytes in a line */ |
| 2570 |
if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && |
| 2571 |
(((i + 1) & 0x3) == 0)) { |
| 2572 |
BLOGD(sc, DBG_LOAD, "\n"); |
| 2573 |
BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1); |
| 2574 |
} |
| 2575 |
} |
| 2576 |
|
| 2577 |
BLOGD(sc, DBG_LOAD, "\n"); |
| 2578 |
} |
| 2579 |
#endif |
| 2580 |
|
| 2581 |
/* |
| 2582 |
* FreeBSD Device probe function. |
| 2583 |
* |
| 2584 |
* Compares the device found to the driver's list of supported devices and |
| 2585 |
* reports back to the bsd loader whether this is the right driver for the device. |
| 2586 |
* This is the driver entry function called from the "kldload" command. |
| 2587 |
* |
| 2588 |
* Returns: |
| 2589 |
* BUS_PROBE_DEFAULT on success, positive value on failure. |
| 2590 |
*/ |
| 2591 |
static int |
| 2592 |
bxe_probe(device_t dev) |
| 2593 |
{ |
| 2594 |
struct bxe_softc *sc; |
| 2595 |
struct bxe_device_type *t; |
| 2596 |
char *descbuf; |
| 2597 |
uint16_t did, sdid, svid, vid; |
| 2598 |
|
| 2599 |
/* Find our device structure */ |
| 2600 |
sc = device_get_softc(dev); |
| 2601 |
sc->dev = dev; |
| 2602 |
t = bxe_devs; |
| 2603 |
|
| 2604 |
/* Get the data for the device to be probed. */ |
| 2605 |
vid = pci_get_vendor(dev); |
| 2606 |
did = pci_get_device(dev); |
| 2607 |
svid = pci_get_subvendor(dev); |
| 2608 |
sdid = pci_get_subdevice(dev); |
| 2609 |
|
| 2610 |
BLOGD(sc, DBG_LOAD, |
| 2611 |
"%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, " |
| 2612 |
"SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid); |
| 2613 |
|
| 2614 |
/* Look through the list of known devices for a match. */ |
| 2615 |
while (t->bxe_name != NULL) { |
| 2616 |
if ((vid == t->bxe_vid) && (did == t->bxe_did) && |
| 2617 |
((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && |
| 2618 |
((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { |
| 2619 |
descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); |
| 2620 |
if (descbuf == NULL) |
| 2621 |
return (ENOMEM); |
| 2622 |
|
| 2623 |
/* Print out the device identity. */ |
| 2624 |
snprintf(descbuf, BXE_DEVDESC_MAX, |
| 2625 |
"%s (%c%d) BXE v:%s\n", t->bxe_name, |
| 2626 |
(((pci_read_config(dev, PCIR_REVID, 4) & |
| 2627 |
0xf0) >> 4) + 'A'), |
| 2628 |
(pci_read_config(dev, PCIR_REVID, 4) & 0xf), |
| 2629 |
BXE_DRIVER_VERSION); |
| 2630 |
|
| 2631 |
device_set_desc_copy(dev, descbuf); |
| 2632 |
free(descbuf, M_TEMP); |
| 2633 |
return (BUS_PROBE_DEFAULT); |
| 2634 |
} |
| 2635 |
t++; |
| 2636 |
} |
| 2637 |
|
| 2638 |
return (ENXIO); |
| 2639 |
} |
| 2640 |
|
| 2641 |
static void |
| 2642 |
bxe_init_mutexes(struct bxe_softc *sc) |
| 2643 |
{ |
| 2644 |
#ifdef BXE_CORE_LOCK_SX |
| 2645 |
snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), |
| 2646 |
"bxe%d_core_lock", sc->unit); |
| 2647 |
sx_init(&sc->core_sx, sc->core_sx_name); |
| 2648 |
#else |
| 2649 |
snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), |
| 2650 |
"bxe%d_core_lock", sc->unit); |
| 2651 |
mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); |
| 2652 |
#endif |
| 2653 |
|
| 2654 |
snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), |
| 2655 |
"bxe%d_sp_lock", sc->unit); |
| 2656 |
mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); |
| 2657 |
|
| 2658 |
snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), |
| 2659 |
"bxe%d_dmae_lock", sc->unit); |
| 2660 |
mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); |
| 2661 |
|
| 2662 |
snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), |
| 2663 |
"bxe%d_phy_lock", sc->unit); |
| 2664 |
mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); |
| 2665 |
|
| 2666 |
snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), |
| 2667 |
"bxe%d_fwmb_lock", sc->unit); |
| 2668 |
mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); |
| 2669 |
|
| 2670 |
snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), |
| 2671 |
"bxe%d_print_lock", sc->unit); |
| 2672 |
mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); |
| 2673 |
|
| 2674 |
snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), |
| 2675 |
"bxe%d_stats_lock", sc->unit); |
| 2676 |
mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); |
| 2677 |
|
| 2678 |
snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), |
| 2679 |
"bxe%d_mcast_lock", sc->unit); |
| 2680 |
mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); |
| 2681 |
} |
| 2682 |
|
| 2683 |
static void |
| 2684 |
bxe_release_mutexes(struct bxe_softc *sc) |
| 2685 |
{ |
| 2686 |
#ifdef BXE_CORE_LOCK_SX |
| 2687 |
sx_destroy(&sc->core_sx); |
| 2688 |
#else |
| 2689 |
if (mtx_initialized(&sc->core_mtx)) { |
| 2690 |
mtx_destroy(&sc->core_mtx); |
| 2691 |
} |
| 2692 |
#endif |
| 2693 |
|
| 2694 |
if (mtx_initialized(&sc->sp_mtx)) { |
| 2695 |
mtx_destroy(&sc->sp_mtx); |
| 2696 |
} |
| 2697 |
|
| 2698 |
if (mtx_initialized(&sc->dmae_mtx)) { |
| 2699 |
mtx_destroy(&sc->dmae_mtx); |
| 2700 |
} |
| 2701 |
|
| 2702 |
if (mtx_initialized(&sc->port.phy_mtx)) { |
| 2703 |
mtx_destroy(&sc->port.phy_mtx); |
| 2704 |
} |
| 2705 |
|
| 2706 |
if (mtx_initialized(&sc->fwmb_mtx)) { |
| 2707 |
mtx_destroy(&sc->fwmb_mtx); |
| 2708 |
} |
| 2709 |
|
| 2710 |
if (mtx_initialized(&sc->print_mtx)) { |
| 2711 |
mtx_destroy(&sc->print_mtx); |
| 2712 |
} |
| 2713 |
|
| 2714 |
if (mtx_initialized(&sc->stats_mtx)) { |
| 2715 |
mtx_destroy(&sc->stats_mtx); |
| 2716 |
} |
| 2717 |
|
| 2718 |
if (mtx_initialized(&sc->mcast_mtx)) { |
| 2719 |
mtx_destroy(&sc->mcast_mtx); |
| 2720 |
} |
| 2721 |
} |
| 2722 |
|
| 2723 |
static void |
| 2724 |
bxe_tx_disable(struct bxe_softc* sc) |
| 2725 |
{ |
| 2726 |
if_t ifp = sc->ifp; |
| 2727 |
|
| 2728 |
/* tell the stack the driver is stopped and TX queue is full */ |
| 2729 |
if (ifp != NULL) { |
| 2730 |
if_setdrvflags(ifp, 0); |
| 2731 |
} |
| 2732 |
} |
| 2733 |
|
| 2734 |
static void |
| 2735 |
bxe_drv_pulse(struct bxe_softc *sc) |
| 2736 |
{ |
| 2737 |
SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, |
| 2738 |
sc->fw_drv_pulse_wr_seq); |
| 2739 |
} |
| 2740 |
|
| 2741 |
static inline uint16_t |
| 2742 |
bxe_tx_avail(struct bxe_softc *sc, |
| 2743 |
struct bxe_fastpath *fp) |
| 2744 |
{ |
| 2745 |
int16_t used; |
| 2746 |
uint16_t prod; |
| 2747 |
uint16_t cons; |
| 2748 |
|
| 2749 |
prod = fp->tx_bd_prod; |
| 2750 |
cons = fp->tx_bd_cons; |
| 2751 |
|
| 2752 |
used = SUB_S16(prod, cons); |
| 2753 |
|
| 2754 |
#if 0 |
| 2755 |
KASSERT((used < 0), ("used tx bds < 0")); |
| 2756 |
KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size")); |
| 2757 |
KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL), |
| 2758 |
("invalid number of tx bds used")); |
| 2759 |
#endif |
| 2760 |
|
| 2761 |
return (int16_t)(sc->tx_ring_size) - used; |
| 2762 |
} |
| 2763 |
|
| 2764 |
static inline int |
| 2765 |
bxe_tx_queue_has_work(struct bxe_fastpath *fp) |
| 2766 |
{ |
| 2767 |
uint16_t hw_cons; |
| 2768 |
|
| 2769 |
mb(); /* status block fields can change */ |
| 2770 |
hw_cons = le16toh(*fp->tx_cons_sb); |
| 2771 |
return (hw_cons != fp->tx_pkt_cons); |
| 2772 |
} |
| 2773 |
|
| 2774 |
static inline uint8_t |
| 2775 |
bxe_has_tx_work(struct bxe_fastpath *fp) |
| 2776 |
{ |
| 2777 |
/* expand this for multi-cos if ever supported */ |
| 2778 |
return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; |
| 2779 |
} |
| 2780 |
|
| 2781 |
static inline int |
| 2782 |
bxe_has_rx_work(struct bxe_fastpath *fp) |
| 2783 |
{ |
| 2784 |
uint16_t rx_cq_cons_sb; |
| 2785 |
|
| 2786 |
mb(); /* status block fields can change */ |
| 2787 |
rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); |
| 2788 |
if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) |
| 2789 |
rx_cq_cons_sb++; |
| 2790 |
return (fp->rx_cq_cons != rx_cq_cons_sb); |
| 2791 |
} |
| 2792 |
|
| 2793 |
static void |
| 2794 |
bxe_sp_event(struct bxe_softc *sc, |
| 2795 |
struct bxe_fastpath *fp, |
| 2796 |
union eth_rx_cqe *rr_cqe) |
| 2797 |
{ |
| 2798 |
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); |
| 2799 |
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); |
| 2800 |
enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; |
| 2801 |
struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; |
| 2802 |
|
| 2803 |
BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", |
| 2804 |
fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); |
| 2805 |
|
| 2806 |
#if 0 |
| 2807 |
/* |
| 2808 |
* If cid is within VF range, replace the slowpath object with the |
| 2809 |
* one corresponding to this VF |
| 2810 |
*/ |
| 2811 |
if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) { |
| 2812 |
bxe_iov_set_queue_sp_obj(sc, cid, &q_obj); |
| 2813 |
} |
| 2814 |
#endif |
| 2815 |
|
| 2816 |
switch (command) { |
| 2817 |
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): |
| 2818 |
BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); |
| 2819 |
drv_cmd = ECORE_Q_CMD_UPDATE; |
| 2820 |
break; |
| 2821 |
|
| 2822 |
case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): |
| 2823 |
BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); |
| 2824 |
drv_cmd = ECORE_Q_CMD_SETUP; |
| 2825 |
break; |
| 2826 |
|
| 2827 |
case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): |
| 2828 |
BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); |
| 2829 |
drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; |
| 2830 |
break; |
| 2831 |
|
| 2832 |
case (RAMROD_CMD_ID_ETH_HALT): |
| 2833 |
BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); |
| 2834 |
drv_cmd = ECORE_Q_CMD_HALT; |
| 2835 |
break; |
| 2836 |
|
| 2837 |
case (RAMROD_CMD_ID_ETH_TERMINATE): |
| 2838 |
BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); |
| 2839 |
drv_cmd = ECORE_Q_CMD_TERMINATE; |
| 2840 |
break; |
| 2841 |
|
| 2842 |
case (RAMROD_CMD_ID_ETH_EMPTY): |
| 2843 |
BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); |
| 2844 |
drv_cmd = ECORE_Q_CMD_EMPTY; |
| 2845 |
break; |
| 2846 |
|
| 2847 |
default: |
| 2848 |
BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", |
| 2849 |
command, fp->index); |
| 2850 |
return; |
| 2851 |
} |
| 2852 |
|
| 2853 |
if ((drv_cmd != ECORE_Q_CMD_MAX) && |
| 2854 |
q_obj->complete_cmd(sc, q_obj, drv_cmd)) { |
| 2855 |
/* |
| 2856 |
* q_obj->complete_cmd() failure means that this was |
| 2857 |
* an unexpected completion. |
| 2858 |
* |
| 2859 |
* In this case we don't want to increase the sc->spq_left |
| 2860 |
* because apparently we haven't sent this command the first |
| 2861 |
* place. |
| 2862 |
*/ |
| 2863 |
// bxe_panic(sc, ("Unexpected SP completion\n")); |
| 2864 |
return; |
| 2865 |
} |
| 2866 |
|
| 2867 |
#if 0 |
| 2868 |
/* SRIOV: reschedule any 'in_progress' operations */ |
| 2869 |
bxe_iov_sp_event(sc, cid, TRUE); |
| 2870 |
#endif |
| 2871 |
|
| 2872 |
atomic_add_acq_long(&sc->cq_spq_left, 1); |
| 2873 |
|
| 2874 |
BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", |
| 2875 |
atomic_load_acq_long(&sc->cq_spq_left)); |
| 2876 |
|
| 2877 |
#if 0 |
| 2878 |
if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && |
| 2879 |
(!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) { |
| 2880 |
/* |
| 2881 |
* If Queue update ramrod is completed for last Queue in AFEX VIF set |
| 2882 |
* flow, then ACK MCP at the end. Mark pending ACK to MCP bit to |
| 2883 |
* prevent case that both bits are cleared. At the end of load/unload |
| 2884 |
* driver checks that sp_state is cleared and this order prevents |
| 2885 |
* races. |
| 2886 |
*/ |
| 2887 |
bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state); |
| 2888 |
wmb(); |
| 2889 |
bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state); |
| 2890 |
|
| 2891 |
/* schedule the sp task as MCP ack is required */ |
| 2892 |
bxe_schedule_sp_task(sc); |
| 2893 |
} |
| 2894 |
#endif |
| 2895 |
} |
| 2896 |
|
| 2897 |
/* |
| 2898 |
* The current mbuf is part of an aggregation. Move the mbuf into the TPA |
| 2899 |
* aggregation queue, put an empty mbuf back onto the receive chain, and mark |
| 2900 |
* the current aggregation queue as in-progress. |
| 2901 |
*/ |
| 2902 |
static void |
| 2903 |
bxe_tpa_start(struct bxe_softc *sc, |
| 2904 |
struct bxe_fastpath *fp, |
| 2905 |
uint16_t queue, |
| 2906 |
uint16_t cons, |
| 2907 |
uint16_t prod, |
| 2908 |
struct eth_fast_path_rx_cqe *cqe) |
| 2909 |
{ |
| 2910 |
struct bxe_sw_rx_bd tmp_bd; |
| 2911 |
struct bxe_sw_rx_bd *rx_buf; |
| 2912 |
struct eth_rx_bd *rx_bd; |
| 2913 |
int max_agg_queues; |
| 2914 |
struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; |
| 2915 |
uint16_t index; |
| 2916 |
|
| 2917 |
BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " |
| 2918 |
"cons=%d prod=%d\n", |
| 2919 |
fp->index, queue, cons, prod); |
| 2920 |
|
| 2921 |
max_agg_queues = MAX_AGG_QS(sc); |
| 2922 |
|
| 2923 |
KASSERT((queue < max_agg_queues), |
| 2924 |
("fp[%02d] invalid aggr queue (%d >= %d)!", |
| 2925 |
fp->index, queue, max_agg_queues)); |
| 2926 |
|
| 2927 |
KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), |
| 2928 |
("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", |
| 2929 |
fp->index, queue)); |
| 2930 |
|
| 2931 |
/* copy the existing mbuf and mapping from the TPA pool */ |
| 2932 |
tmp_bd = tpa_info->bd; |
| 2933 |
|
| 2934 |
if (tmp_bd.m == NULL) { |
| 2935 |
BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n", |
| 2936 |
fp->index, queue); |
| 2937 |
/* XXX Error handling? */ |
| 2938 |
return; |
| 2939 |
} |
| 2940 |
|
| 2941 |
/* change the TPA queue to the start state */ |
| 2942 |
tpa_info->state = BXE_TPA_STATE_START; |
| 2943 |
tpa_info->placement_offset = cqe->placement_offset; |
| 2944 |
tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); |
| 2945 |
tpa_info->vlan_tag = le16toh(cqe->vlan_tag); |
| 2946 |
tpa_info->len_on_bd = le16toh(cqe->len_on_bd); |
| 2947 |
|
| 2948 |
fp->rx_tpa_queue_used |= (1 << queue); |
| 2949 |
|
| 2950 |
/* |
| 2951 |
* If all the buffer descriptors are filled with mbufs then fill in |
| 2952 |
* the current consumer index with a new BD. Else if a maximum Rx |
| 2953 |
* buffer limit is imposed then fill in the next producer index. |
| 2954 |
*/ |
| 2955 |
index = (sc->max_rx_bufs != RX_BD_USABLE) ? |
| 2956 |
prod : cons; |
| 2957 |
|
| 2958 |
/* move the received mbuf and mapping to TPA pool */ |
| 2959 |
tpa_info->bd = fp->rx_mbuf_chain[cons]; |
| 2960 |
|
| 2961 |
/* release any existing RX BD mbuf mappings */ |
| 2962 |
if (cons != index) { |
| 2963 |
rx_buf = &fp->rx_mbuf_chain[cons]; |
| 2964 |
|
| 2965 |
if (rx_buf->m_map != NULL) { |
| 2966 |
bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, |
| 2967 |
BUS_DMASYNC_POSTREAD); |
| 2968 |
bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); |
| 2969 |
} |
| 2970 |
|
| 2971 |
/* |
| 2972 |
* We get here when the maximum number of rx buffers is less than |
| 2973 |
* RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL |
| 2974 |
* it out here without concern of a memory leak. |
| 2975 |
*/ |
| 2976 |
fp->rx_mbuf_chain[cons].m = NULL; |
| 2977 |
} |
| 2978 |
|
| 2979 |
/* update the Rx SW BD with the mbuf info from the TPA pool */ |
| 2980 |
fp->rx_mbuf_chain[index] = tmp_bd; |
| 2981 |
|
| 2982 |
/* update the Rx BD with the empty mbuf phys address from the TPA pool */ |
| 2983 |
rx_bd = &fp->rx_chain[index]; |
| 2984 |
rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); |
| 2985 |
rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); |
| 2986 |
} |
| 2987 |
|
| 2988 |
/* |
| 2989 |
* When a TPA aggregation is completed, loop through the individual mbufs |
| 2990 |
* of the aggregation, combining them into a single mbuf which will be sent |
| 2991 |
* up the stack. Refill all freed SGEs with mbufs as we go along. |
| 2992 |
*/ |
| 2993 |
static int |
| 2994 |
bxe_fill_frag_mbuf(struct bxe_softc *sc, |
| 2995 |
struct bxe_fastpath *fp, |
| 2996 |
struct bxe_sw_tpa_info *tpa_info, |
| 2997 |
uint16_t queue, |
| 2998 |
uint16_t pages, |
| 2999 |
struct mbuf *m, |
| 3000 |
struct eth_end_agg_rx_cqe *cqe, |
| 3001 |
uint16_t cqe_idx) |
| 3002 |
{ |
| 3003 |
struct mbuf *m_frag; |
| 3004 |
uint32_t frag_len, frag_size, i; |
| 3005 |
uint16_t sge_idx; |
| 3006 |
int rc = 0; |
| 3007 |
int j; |
| 3008 |
|
| 3009 |
frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; |
| 3010 |
|
| 3011 |
BLOGD(sc, DBG_LRO, |
| 3012 |
"fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", |
| 3013 |
fp->index, queue, tpa_info->len_on_bd, frag_size, pages); |
| 3014 |
|
| 3015 |
/* make sure the aggregated frame is not too big to handle */ |
| 3016 |
if (pages > 8 * PAGES_PER_SGE) { |
| 3017 |
BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " |
| 3018 |
"pkt_len=%d len_on_bd=%d frag_size=%d\n", |
| 3019 |
fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), |
| 3020 |
tpa_info->len_on_bd, frag_size); |
| 3021 |
bxe_panic(sc, ("sge page count error\n")); |
| 3022 |
return (EINVAL); |
| 3023 |
} |
| 3024 |
|
| 3025 |
/* |
| 3026 |
* Scan through the scatter gather list pulling individual mbufs into a |
| 3027 |
* single mbuf for the host stack. |
| 3028 |
*/ |
| 3029 |
for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { |
| 3030 |
sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); |
| 3031 |
|
| 3032 |
/* |
| 3033 |
* Firmware gives the indices of the SGE as if the ring is an array |
| 3034 |
* (meaning that the "next" element will consume 2 indices). |
| 3035 |
*/ |
| 3036 |
frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); |
| 3037 |
|
| 3038 |
BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " |
| 3039 |
"sge_idx=%d frag_size=%d frag_len=%d\n", |
| 3040 |
fp->index, queue, i, j, sge_idx, frag_size, frag_len); |
| 3041 |
|
| 3042 |
m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; |
| 3043 |
|
| 3044 |
/* allocate a new mbuf for the SGE */ |
| 3045 |
rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); |
| 3046 |
if (rc) { |
| 3047 |
/* Leave all remaining SGEs in the ring! */ |
| 3048 |
return (rc); |
| 3049 |
} |
| 3050 |
|
| 3051 |
/* update the fragment length */ |
| 3052 |
m_frag->m_len = frag_len; |
| 3053 |
|
| 3054 |
/* concatenate the fragment to the head mbuf */ |
| 3055 |
m_cat(m, m_frag); |
| 3056 |
fp->eth_q_stats.mbuf_alloc_sge--; |
| 3057 |
|
| 3058 |
/* update the TPA mbuf size and remaining fragment size */ |
| 3059 |
m->m_pkthdr.len += frag_len; |
| 3060 |
frag_size -= frag_len; |
| 3061 |
} |
| 3062 |
|
| 3063 |
BLOGD(sc, DBG_LRO, |
| 3064 |
"fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", |
| 3065 |
fp->index, queue, frag_size); |
| 3066 |
|
| 3067 |
return (rc); |
| 3068 |
} |
| 3069 |
|
| 3070 |
static inline void |
| 3071 |
bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) |
| 3072 |
{ |
| 3073 |
int i, j; |
| 3074 |
|
| 3075 |
for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { |
| 3076 |
int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; |
| 3077 |
|
| 3078 |
for (j = 0; j < 2; j++) { |
| 3079 |
BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); |
| 3080 |
idx--; |
| 3081 |
} |
| 3082 |
} |
| 3083 |
} |
| 3084 |
|
| 3085 |
static inline void |
| 3086 |
bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) |
| 3087 |
{ |
| 3088 |
/* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ |
| 3089 |
memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); |
| 3090 |
|
| 3091 |
/* |
| 3092 |
* Clear the two last indices in the page to 1. These are the indices that |
| 3093 |
* correspond to the "next" element, hence will never be indicated and |
| 3094 |
* should be removed from the calculations. |
| 3095 |
*/ |
| 3096 |
bxe_clear_sge_mask_next_elems(fp); |
| 3097 |
} |
| 3098 |
|
| 3099 |
static inline void |
| 3100 |
bxe_update_last_max_sge(struct bxe_fastpath *fp, |
| 3101 |
uint16_t idx) |
| 3102 |
{ |
| 3103 |
uint16_t last_max = fp->last_max_sge; |
| 3104 |
|
| 3105 |
if (SUB_S16(idx, last_max) > 0) { |
| 3106 |
fp->last_max_sge = idx; |
| 3107 |
} |
| 3108 |
} |
| 3109 |
|
| 3110 |
static inline void |
| 3111 |
bxe_update_sge_prod(struct bxe_softc *sc, |
| 3112 |
struct bxe_fastpath *fp, |
| 3113 |
uint16_t sge_len, |
| 3114 |
struct eth_end_agg_rx_cqe *cqe) |
| 3115 |
{ |
| 3116 |
uint16_t last_max, last_elem, first_elem; |
| 3117 |
uint16_t delta = 0; |
| 3118 |
uint16_t i; |
| 3119 |
|
| 3120 |
if (!sge_len) { |
| 3121 |
return; |
| 3122 |
} |
| 3123 |
|
| 3124 |
/* first mark all used pages */ |
| 3125 |
for (i = 0; i < sge_len; i++) { |
| 3126 |
BIT_VEC64_CLEAR_BIT(fp->sge_mask, |
| 3127 |
RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[i]))); |
| 3128 |
} |
| 3129 |
|
| 3130 |
BLOGD(sc, DBG_LRO, |
| 3131 |
"fp[%02d] fp_cqe->sgl[%d] = %d\n", |
| 3132 |
fp->index, sge_len - 1, |
| 3133 |
le16toh(cqe->sgl_or_raw_data.sgl[sge_len - 1])); |
| 3134 |
|
| 3135 |
/* assume that the last SGE index is the biggest */ |
| 3136 |
bxe_update_last_max_sge(fp, |
| 3137 |
le16toh(cqe->sgl_or_raw_data.sgl[sge_len - 1])); |
| 3138 |
|
| 3139 |
last_max = RX_SGE(fp->last_max_sge); |
| 3140 |
last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; |
| 3141 |
first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; |
| 3142 |
|
| 3143 |
/* if ring is not full */ |
| 3144 |
if (last_elem + 1 != first_elem) { |
| 3145 |
last_elem++; |
| 3146 |
} |
| 3147 |
|
| 3148 |
/* now update the prod */ |
| 3149 |
for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { |
| 3150 |
if (__predict_true(fp->sge_mask[i])) { |
| 3151 |
break; |
| 3152 |
} |
| 3153 |
|
| 3154 |
fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; |
| 3155 |
delta += BIT_VEC64_ELEM_SZ; |
| 3156 |
} |
| 3157 |
|
| 3158 |
if (delta > 0) { |
| 3159 |
fp->rx_sge_prod += delta; |
| 3160 |
/* clear page-end entries */ |
| 3161 |
bxe_clear_sge_mask_next_elems(fp); |
| 3162 |
} |
| 3163 |
|
| 3164 |
BLOGD(sc, DBG_LRO, |
| 3165 |
"fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", |
| 3166 |
fp->index, fp->last_max_sge, fp->rx_sge_prod); |
| 3167 |
} |
| 3168 |
|
| 3169 |
/* |
| 3170 |
* The aggregation on the current TPA queue has completed. Pull the individual |
| 3171 |
* mbuf fragments together into a single mbuf, perform all necessary checksum |
| 3172 |
* calculations, and send the resuting mbuf to the stack. |
| 3173 |
*/ |
| 3174 |
static void |
| 3175 |
bxe_tpa_stop(struct bxe_softc *sc, |
| 3176 |
struct bxe_fastpath *fp, |
| 3177 |
struct bxe_sw_tpa_info *tpa_info, |
| 3178 |
uint16_t queue, |
| 3179 |
uint16_t pages, |
| 3180 |
struct eth_end_agg_rx_cqe *cqe, |
| 3181 |
uint16_t cqe_idx) |
| 3182 |
{ |
| 3183 |
if_t ifp = sc->ifp; |
| 3184 |
struct mbuf *m; |
| 3185 |
int rc = 0; |
| 3186 |
|
| 3187 |
BLOGD(sc, DBG_LRO, |
| 3188 |
"fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", |
| 3189 |
fp->index, queue, tpa_info->placement_offset, |
| 3190 |
le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); |
| 3191 |
|
| 3192 |
m = tpa_info->bd.m; |
| 3193 |
|
| 3194 |
/* allocate a replacement before modifying existing mbuf */ |
| 3195 |
rc = bxe_alloc_rx_tpa_mbuf(fp, queue); |
| 3196 |
if (rc) { |
| 3197 |
/* drop the frame and log an error */ |
| 3198 |
fp->eth_q_stats.rx_soft_errors++; |
| 3199 |
goto bxe_tpa_stop_exit; |
| 3200 |
} |
| 3201 |
|
| 3202 |
/* we have a replacement, fixup the current mbuf */ |
| 3203 |
m_adj(m, tpa_info->placement_offset); |
| 3204 |
m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; |
| 3205 |
|
| 3206 |
/* mark the checksums valid (taken care of by the firmware) */ |
| 3207 |
fp->eth_q_stats.rx_ofld_frames_csum_ip++; |
| 3208 |
fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; |
| 3209 |
m->m_pkthdr.csum_data = 0xffff; |
| 3210 |
m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | |
| 3211 |
CSUM_IP_VALID | |
| 3212 |
CSUM_DATA_VALID | |
| 3213 |
CSUM_PSEUDO_HDR); |
| 3214 |
|
| 3215 |
/* aggregate all of the SGEs into a single mbuf */ |
| 3216 |
rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); |
| 3217 |
if (rc) { |
| 3218 |
/* drop the packet and log an error */ |
| 3219 |
fp->eth_q_stats.rx_soft_errors++; |
| 3220 |
m_freem(m); |
| 3221 |
} else { |
| 3222 |
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) { |
| 3223 |
m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; |
| 3224 |
m->m_flags |= M_VLANTAG; |
| 3225 |
} |
| 3226 |
|
| 3227 |
/* assign packet to this interface interface */ |
| 3228 |
if_setrcvif(m, ifp); |
| 3229 |
|
| 3230 |
#if __FreeBSD_version >= 800000 |
| 3231 |
/* specify what RSS queue was used for this flow */ |
| 3232 |
m->m_pkthdr.flowid = fp->index; |
| 3233 |
m->m_flags |= M_FLOWID; |
| 3234 |
#endif |
| 3235 |
|
| 3236 |
if_incipackets(ifp, 1); |
| 3237 |
fp->eth_q_stats.rx_tpa_pkts++; |
| 3238 |
|
| 3239 |
/* pass the frame to the stack */ |
| 3240 |
if_input(ifp, m); |
| 3241 |
} |
| 3242 |
|
| 3243 |
/* we passed an mbuf up the stack or dropped the frame */ |
| 3244 |
fp->eth_q_stats.mbuf_alloc_tpa--; |
| 3245 |
|
| 3246 |
bxe_tpa_stop_exit: |
| 3247 |
|
| 3248 |
fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; |
| 3249 |
fp->rx_tpa_queue_used &= ~(1 << queue); |
| 3250 |
} |
| 3251 |
|
| 3252 |
static uint8_t |
| 3253 |
bxe_rxeof(struct bxe_softc *sc, |
| 3254 |
struct bxe_fastpath *fp) |
| 3255 |
{ |
| 3256 |
if_t ifp = sc->ifp; |
| 3257 |
uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; |
| 3258 |
uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; |
| 3259 |
int rx_pkts = 0; |
| 3260 |
int rc; |
| 3261 |
|
| 3262 |
BXE_FP_RX_LOCK(fp); |
| 3263 |
|
| 3264 |
/* CQ "next element" is of the size of the regular element */ |
| 3265 |
hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); |
| 3266 |
if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { |
| 3267 |
hw_cq_cons++; |
| 3268 |
} |
| 3269 |
|
| 3270 |
bd_cons = fp->rx_bd_cons; |
| 3271 |
bd_prod = fp->rx_bd_prod; |
| 3272 |
bd_prod_fw = bd_prod; |
| 3273 |
sw_cq_cons = fp->rx_cq_cons; |
| 3274 |
sw_cq_prod = fp->rx_cq_prod; |
| 3275 |
|
| 3276 |
/* |
| 3277 |
* Memory barrier necessary as speculative reads of the rx |
| 3278 |
* buffer can be ahead of the index in the status block |
| 3279 |
*/ |
| 3280 |
rmb(); |
| 3281 |
|
| 3282 |
BLOGD(sc, DBG_RX, |
| 3283 |
"fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", |
| 3284 |
fp->index, hw_cq_cons, sw_cq_cons); |
| 3285 |
|
| 3286 |
while (sw_cq_cons != hw_cq_cons) { |
| 3287 |
struct bxe_sw_rx_bd *rx_buf = NULL; |
| 3288 |
union eth_rx_cqe *cqe; |
| 3289 |
struct eth_fast_path_rx_cqe *cqe_fp; |
| 3290 |
uint8_t cqe_fp_flags; |
| 3291 |
enum eth_rx_cqe_type cqe_fp_type; |
| 3292 |
uint16_t len, pad; |
| 3293 |
struct mbuf *m = NULL; |
| 3294 |
|
| 3295 |
comp_ring_cons = RCQ(sw_cq_cons); |
| 3296 |
bd_prod = RX_BD(bd_prod); |
| 3297 |
bd_cons = RX_BD(bd_cons); |
| 3298 |
|
| 3299 |
cqe = &fp->rcq_chain[comp_ring_cons]; |
| 3300 |
cqe_fp = &cqe->fast_path_cqe; |
| 3301 |
cqe_fp_flags = cqe_fp->type_error_flags; |
| 3302 |
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; |
| 3303 |
|
| 3304 |
BLOGD(sc, DBG_RX, |
| 3305 |
"fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " |
| 3306 |
"BD prod=%d cons=%d CQE type=0x%x err=0x%x " |
| 3307 |
"status=0x%x rss_hash=0x%x vlan=0x%x len=%u\n", |
| 3308 |
fp->index, |
| 3309 |
hw_cq_cons, |
| 3310 |
sw_cq_cons, |
| 3311 |
bd_prod, |
| 3312 |
bd_cons, |
| 3313 |
CQE_TYPE(cqe_fp_flags), |
| 3314 |
cqe_fp_flags, |
| 3315 |
cqe_fp->status_flags, |
| 3316 |
le32toh(cqe_fp->rss_hash_result), |
| 3317 |
le16toh(cqe_fp->vlan_tag), |
| 3318 |
le16toh(cqe_fp->pkt_len_or_gro_seg_len)); |
| 3319 |
|
| 3320 |
/* is this a slowpath msg? */ |
| 3321 |
if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { |
| 3322 |
bxe_sp_event(sc, fp, cqe); |
| 3323 |
goto next_cqe; |
| 3324 |
} |
| 3325 |
|
| 3326 |
rx_buf = &fp->rx_mbuf_chain[bd_cons]; |
| 3327 |
|
| 3328 |
if (!CQE_TYPE_FAST(cqe_fp_type)) { |
| 3329 |
struct bxe_sw_tpa_info *tpa_info; |
| 3330 |
uint16_t frag_size, pages; |
| 3331 |
uint8_t queue; |
| 3332 |
|
| 3333 |
#if 0 |
| 3334 |
/* sanity check */ |
| 3335 |
if (!fp->tpa_enable && |
| 3336 |
(CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) { |
| 3337 |
BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n", |
| 3338 |
CQE_TYPE(cqe_fp_type)); |
| 3339 |
} |
| 3340 |
#endif |
| 3341 |
|
| 3342 |
if (CQE_TYPE_START(cqe_fp_type)) { |
| 3343 |
bxe_tpa_start(sc, fp, cqe_fp->queue_index, |
| 3344 |
bd_cons, bd_prod, cqe_fp); |
| 3345 |
m = NULL; /* packet not ready yet */ |
| 3346 |
goto next_rx; |
| 3347 |
} |
| 3348 |
|
| 3349 |
KASSERT(CQE_TYPE_STOP(cqe_fp_type), |
| 3350 |
("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); |
| 3351 |
|
| 3352 |
queue = cqe->end_agg_cqe.queue_index; |
| 3353 |
tpa_info = &fp->rx_tpa_info[queue]; |
| 3354 |
|
| 3355 |
BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", |
| 3356 |
fp->index, queue); |
| 3357 |
|
| 3358 |
frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - |
| 3359 |
tpa_info->len_on_bd); |
| 3360 |
pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; |
| 3361 |
|
| 3362 |
bxe_tpa_stop(sc, fp, tpa_info, queue, pages, |
| 3363 |
&cqe->end_agg_cqe, comp_ring_cons); |
| 3364 |
|
| 3365 |
bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe); |
| 3366 |
|
| 3367 |
goto next_cqe; |
| 3368 |
} |
| 3369 |
|
| 3370 |
/* non TPA */ |
| 3371 |
|
| 3372 |
/* is this an error packet? */ |
| 3373 |
if (__predict_false(cqe_fp_flags & |
| 3374 |
ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { |
| 3375 |
BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); |
| 3376 |
fp->eth_q_stats.rx_soft_errors++; |
| 3377 |
goto next_rx; |
| 3378 |
} |
| 3379 |
|
| 3380 |
len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); |
| 3381 |
pad = cqe_fp->placement_offset; |
| 3382 |
|
| 3383 |
m = rx_buf->m; |
| 3384 |
|
| 3385 |
if (__predict_false(m == NULL)) { |
| 3386 |
BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", |
| 3387 |
bd_cons, fp->index); |
| 3388 |
goto next_rx; |
| 3389 |
} |
| 3390 |
|
| 3391 |
/* XXX double copy if packet length under a threshold */ |
| 3392 |
|
| 3393 |
/* |
| 3394 |
* If all the buffer descriptors are filled with mbufs then fill in |
| 3395 |
* the current consumer index with a new BD. Else if a maximum Rx |
| 3396 |
* buffer limit is imposed then fill in the next producer index. |
| 3397 |
*/ |
| 3398 |
rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, |
| 3399 |
(sc->max_rx_bufs != RX_BD_USABLE) ? |
| 3400 |
bd_prod : bd_cons); |
| 3401 |
if (rc != 0) { |
| 3402 |
BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", |
| 3403 |
fp->index, rc); |
| 3404 |
fp->eth_q_stats.rx_soft_errors++; |
| 3405 |
|
| 3406 |
if (sc->max_rx_bufs != RX_BD_USABLE) { |
| 3407 |
/* copy this consumer index to the producer index */ |
| 3408 |
memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, |
| 3409 |
sizeof(struct bxe_sw_rx_bd)); |
| 3410 |
memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); |
| 3411 |
} |
| 3412 |
|
| 3413 |
goto next_rx; |
| 3414 |
} |
| 3415 |
|
| 3416 |
/* current mbuf was detached from the bd */ |
| 3417 |
fp->eth_q_stats.mbuf_alloc_rx--; |
| 3418 |
|
| 3419 |
/* we allocated a replacement mbuf, fixup the current one */ |
| 3420 |
m_adj(m, pad); |
| 3421 |
m->m_pkthdr.len = m->m_len = len; |
| 3422 |
|
| 3423 |
/* assign packet to this interface interface */ |
| 3424 |
if_setrcvif(m, ifp); |
| 3425 |
|
| 3426 |
/* assume no hardware checksum has complated */ |
| 3427 |
m->m_pkthdr.csum_flags = 0; |
| 3428 |
|
| 3429 |
/* validate checksum if offload enabled */ |
| 3430 |
if (if_getcapenable(ifp) & IFCAP_RXCSUM) { |
| 3431 |
/* check for a valid IP frame */ |
| 3432 |
if (!(cqe->fast_path_cqe.status_flags & |
| 3433 |
ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { |
| 3434 |
m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; |
| 3435 |
if (__predict_false(cqe_fp_flags & |
| 3436 |
ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { |
| 3437 |
fp->eth_q_stats.rx_hw_csum_errors++; |
| 3438 |
} else { |
| 3439 |
fp->eth_q_stats.rx_ofld_frames_csum_ip++; |
| 3440 |
m->m_pkthdr.csum_flags |= CSUM_IP_VALID; |
| 3441 |
} |
| 3442 |
} |
| 3443 |
|
| 3444 |
/* check for a valid TCP/UDP frame */ |
| 3445 |
if (!(cqe->fast_path_cqe.status_flags & |
| 3446 |
ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { |
| 3447 |
if (__predict_false(cqe_fp_flags & |
| 3448 |
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { |
| 3449 |
fp->eth_q_stats.rx_hw_csum_errors++; |
| 3450 |
} else { |
| 3451 |
fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; |
| 3452 |
m->m_pkthdr.csum_data = 0xFFFF; |
| 3453 |
m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | |
| 3454 |
CSUM_PSEUDO_HDR); |
| 3455 |
} |
| 3456 |
} |
| 3457 |
} |
| 3458 |
|
| 3459 |
/* if there is a VLAN tag then flag that info */ |
| 3460 |
if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_VLAN) { |
| 3461 |
m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; |
| 3462 |
m->m_flags |= M_VLANTAG; |
| 3463 |
} |
| 3464 |
|
| 3465 |
#if __FreeBSD_version >= 800000 |
| 3466 |
/* specify what RSS queue was used for this flow */ |
| 3467 |
m->m_pkthdr.flowid = fp->index; |
| 3468 |
m->m_flags |= M_FLOWID; |
| 3469 |
#endif |
| 3470 |
|
| 3471 |
next_rx: |
| 3472 |
|
| 3473 |
bd_cons = RX_BD_NEXT(bd_cons); |
| 3474 |
bd_prod = RX_BD_NEXT(bd_prod); |
| 3475 |
bd_prod_fw = RX_BD_NEXT(bd_prod_fw); |
| 3476 |
|
| 3477 |
/* pass the frame to the stack */ |
| 3478 |
if (__predict_true(m != NULL)) { |
| 3479 |
if_incipackets(ifp, 1); |
| 3480 |
rx_pkts++; |
| 3481 |
if_input(ifp, m); |
| 3482 |
} |
| 3483 |
|
| 3484 |
next_cqe: |
| 3485 |
|
| 3486 |
sw_cq_prod = RCQ_NEXT(sw_cq_prod); |
| 3487 |
sw_cq_cons = RCQ_NEXT(sw_cq_cons); |
| 3488 |
|
| 3489 |
/* limit spinning on the queue */ |
| 3490 |
if (rx_pkts == sc->rx_budget) { |
| 3491 |
fp->eth_q_stats.rx_budget_reached++; |
| 3492 |
break; |
| 3493 |
} |
| 3494 |
} /* while work to do */ |
| 3495 |
|
| 3496 |
fp->rx_bd_cons = bd_cons; |
| 3497 |
fp->rx_bd_prod = bd_prod_fw; |
| 3498 |
fp->rx_cq_cons = sw_cq_cons; |
| 3499 |
fp->rx_cq_prod = sw_cq_prod; |
| 3500 |
|
| 3501 |
/* Update producers */ |
| 3502 |
bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); |
| 3503 |
|
| 3504 |
fp->eth_q_stats.rx_pkts += rx_pkts; |
| 3505 |
fp->eth_q_stats.rx_calls++; |
| 3506 |
|
| 3507 |
BXE_FP_RX_UNLOCK(fp); |
| 3508 |
|
| 3509 |
return (sw_cq_cons != hw_cq_cons); |
| 3510 |
} |
| 3511 |
|
| 3512 |
static uint16_t |
| 3513 |
bxe_free_tx_pkt(struct bxe_softc *sc, |
| 3514 |
struct bxe_fastpath *fp, |
| 3515 |
uint16_t idx) |
| 3516 |
{ |
| 3517 |
struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; |
| 3518 |
struct eth_tx_start_bd *tx_start_bd; |
| 3519 |
uint16_t bd_idx = TX_BD(tx_buf->first_bd); |
| 3520 |
uint16_t new_cons; |
| 3521 |
int nbd; |
| 3522 |
|
| 3523 |
/* unmap the mbuf from non-paged memory */ |
| 3524 |
bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); |
| 3525 |
|
| 3526 |
tx_start_bd = &fp->tx_chain[bd_idx].start_bd; |
| 3527 |
nbd = le16toh(tx_start_bd->nbd) - 1; |
| 3528 |
|
| 3529 |
#if 0 |
| 3530 |
if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) { |
| 3531 |
bxe_panic(sc, ("BAD nbd!\n")); |
| 3532 |
} |
| 3533 |
#endif |
| 3534 |
|
| 3535 |
new_cons = (tx_buf->first_bd + nbd); |
| 3536 |
|
| 3537 |
#if 0 |
| 3538 |
struct eth_tx_bd *tx_data_bd; |
| 3539 |
|
| 3540 |
/* |
| 3541 |
* The following code doesn't do anything but is left here |
| 3542 |
* for clarity on what the new value of new_cons skipped. |
| 3543 |
*/ |
| 3544 |
|
| 3545 |
/* get the next bd */ |
| 3546 |
bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); |
| 3547 |
|
| 3548 |
/* skip the parse bd */ |
| 3549 |
--nbd; |
| 3550 |
bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); |
| 3551 |
|
| 3552 |
/* skip the TSO split header bd since they have no mapping */ |
| 3553 |
if (tx_buf->flags & BXE_TSO_SPLIT_BD) { |
| 3554 |
--nbd; |
| 3555 |
bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); |
| 3556 |
} |
| 3557 |
|
| 3558 |
/* now free frags */ |
| 3559 |
while (nbd > 0) { |
| 3560 |
tx_data_bd = &fp->tx_chain[bd_idx].reg_bd; |
| 3561 |
if (--nbd) { |
| 3562 |
bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); |
| 3563 |
} |
| 3564 |
} |
| 3565 |
#endif |
| 3566 |
|
| 3567 |
/* free the mbuf */ |
| 3568 |
if (__predict_true(tx_buf->m != NULL)) { |
| 3569 |
m_freem(tx_buf->m); |
| 3570 |
fp->eth_q_stats.mbuf_alloc_tx--; |
| 3571 |
} else { |
| 3572 |
fp->eth_q_stats.tx_chain_lost_mbuf++; |
| 3573 |
} |
| 3574 |
|
| 3575 |
tx_buf->m = NULL; |
| 3576 |
tx_buf->first_bd = 0; |
| 3577 |
|
| 3578 |
return (new_cons); |
| 3579 |
} |
| 3580 |
|
| 3581 |
/* transmit timeout watchdog */ |
| 3582 |
static int |
| 3583 |
bxe_watchdog(struct bxe_softc *sc, |
| 3584 |
struct bxe_fastpath *fp) |
| 3585 |
{ |
| 3586 |
BXE_FP_TX_LOCK(fp); |
| 3587 |
|
| 3588 |
if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { |
| 3589 |
BXE_FP_TX_UNLOCK(fp); |
| 3590 |
return (0); |
| 3591 |
} |
| 3592 |
|
| 3593 |
BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); |
| 3594 |
|
| 3595 |
BXE_FP_TX_UNLOCK(fp); |
| 3596 |
|
| 3597 |
atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); |
| 3598 |
taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); |
| 3599 |
|
| 3600 |
return (-1); |
| 3601 |
} |
| 3602 |
|
| 3603 |
/* processes transmit completions */ |
| 3604 |
static uint8_t |
| 3605 |
bxe_txeof(struct bxe_softc *sc, |
| 3606 |
struct bxe_fastpath *fp) |
| 3607 |
{ |
| 3608 |
if_t ifp = sc->ifp; |
| 3609 |
uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; |
| 3610 |
uint16_t tx_bd_avail; |
| 3611 |
|
| 3612 |
BXE_FP_TX_LOCK_ASSERT(fp); |
| 3613 |
|
| 3614 |
bd_cons = fp->tx_bd_cons; |
| 3615 |
hw_cons = le16toh(*fp->tx_cons_sb); |
| 3616 |
sw_cons = fp->tx_pkt_cons; |
| 3617 |
|
| 3618 |
while (sw_cons != hw_cons) { |
| 3619 |
pkt_cons = TX_BD(sw_cons); |
| 3620 |
|
| 3621 |
BLOGD(sc, DBG_TX, |
| 3622 |
"TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", |
| 3623 |
fp->index, hw_cons, sw_cons, pkt_cons); |
| 3624 |
|
| 3625 |
bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); |
| 3626 |
|
| 3627 |
sw_cons++; |
| 3628 |
} |
| 3629 |
|
| 3630 |
fp->tx_pkt_cons = sw_cons; |
| 3631 |
fp->tx_bd_cons = bd_cons; |
| 3632 |
|
| 3633 |
BLOGD(sc, DBG_TX, |
| 3634 |
"TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", |
| 3635 |
fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); |
| 3636 |
|
| 3637 |
mb(); |
| 3638 |
|
| 3639 |
tx_bd_avail = bxe_tx_avail(sc, fp); |
| 3640 |
|
| 3641 |
if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { |
| 3642 |
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); |
| 3643 |
} else { |
| 3644 |
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); |
| 3645 |
} |
| 3646 |
|
| 3647 |
if (fp->tx_pkt_prod != fp->tx_pkt_cons) { |
| 3648 |
/* reset the watchdog timer if there are pending transmits */ |
| 3649 |
fp->watchdog_timer = BXE_TX_TIMEOUT; |
| 3650 |
return (TRUE); |
| 3651 |
} else { |
| 3652 |
/* clear watchdog when there are no pending transmits */ |
| 3653 |
fp->watchdog_timer = 0; |
| 3654 |
return (FALSE); |
| 3655 |
} |
| 3656 |
} |
| 3657 |
|
| 3658 |
static void |
| 3659 |
bxe_drain_tx_queues(struct bxe_softc *sc) |
| 3660 |
{ |
| 3661 |
struct bxe_fastpath *fp; |
| 3662 |
int i, count; |
| 3663 |
|
| 3664 |
/* wait until all TX fastpath tasks have completed */ |
| 3665 |
for (i = 0; i < sc->num_queues; i++) { |
| 3666 |
fp = &sc->fp[i]; |
| 3667 |
|
| 3668 |
count = 1000; |
| 3669 |
|
| 3670 |
while (bxe_has_tx_work(fp)) { |
| 3671 |
|
| 3672 |
BXE_FP_TX_LOCK(fp); |
| 3673 |
bxe_txeof(sc, fp); |
| 3674 |
BXE_FP_TX_UNLOCK(fp); |
| 3675 |
|
| 3676 |
if (count == 0) { |
| 3677 |
BLOGE(sc, "Timeout waiting for fp[%d] " |
| 3678 |
"transmits to complete!\n", i); |
| 3679 |
bxe_panic(sc, ("tx drain failure\n")); |
| 3680 |
return; |
| 3681 |
} |
| 3682 |
|
| 3683 |
count--; |
| 3684 |
DELAY(1000); |
| 3685 |
rmb(); |
| 3686 |
} |
| 3687 |
} |
| 3688 |
|
| 3689 |
return; |
| 3690 |
} |
| 3691 |
|
| 3692 |
static int |
| 3693 |
bxe_del_all_macs(struct bxe_softc *sc, |
| 3694 |
struct ecore_vlan_mac_obj *mac_obj, |
| 3695 |
int mac_type, |
| 3696 |
uint8_t wait_for_comp) |
| 3697 |
{ |
| 3698 |
unsigned long ramrod_flags = 0, vlan_mac_flags = 0; |
| 3699 |
int rc; |
| 3700 |
|
| 3701 |
/* wait for completion of requested */ |
| 3702 |
if (wait_for_comp) { |
| 3703 |
bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
| 3704 |
} |
| 3705 |
|
| 3706 |
/* Set the mac type of addresses we want to clear */ |
| 3707 |
bxe_set_bit(mac_type, &vlan_mac_flags); |
| 3708 |
|
| 3709 |
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); |
| 3710 |
if (rc < 0) { |
| 3711 |
BLOGE(sc, "Failed to delete MACs (%d)\n", rc); |
| 3712 |
} |
| 3713 |
|
| 3714 |
return (rc); |
| 3715 |
} |
| 3716 |
|
| 3717 |
static int |
| 3718 |
bxe_fill_accept_flags(struct bxe_softc *sc, |
| 3719 |
uint32_t rx_mode, |
| 3720 |
unsigned long *rx_accept_flags, |
| 3721 |
unsigned long *tx_accept_flags) |
| 3722 |
{ |
| 3723 |
/* Clear the flags first */ |
| 3724 |
*rx_accept_flags = 0; |
| 3725 |
*tx_accept_flags = 0; |
| 3726 |
|
| 3727 |
switch (rx_mode) { |
| 3728 |
case BXE_RX_MODE_NONE: |
| 3729 |
/* |
| 3730 |
* 'drop all' supersedes any accept flags that may have been |
| 3731 |
* passed to the function. |
| 3732 |
*/ |
| 3733 |
break; |
| 3734 |
|
| 3735 |
case BXE_RX_MODE_NORMAL: |
| 3736 |
bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); |
| 3737 |
bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); |
| 3738 |
bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); |
| 3739 |
|
| 3740 |
/* internal switching mode */ |
| 3741 |
bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); |
| 3742 |
bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); |
| 3743 |
bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); |
| 3744 |
|
| 3745 |
break; |
| 3746 |
|
| 3747 |
case BXE_RX_MODE_ALLMULTI: |
| 3748 |
bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); |
| 3749 |
bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); |
| 3750 |
bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); |
| 3751 |
|
| 3752 |
/* internal switching mode */ |
| 3753 |
bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); |
| 3754 |
bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); |
| 3755 |
bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); |
| 3756 |
|
| 3757 |
break; |
| 3758 |
|
| 3759 |
case BXE_RX_MODE_PROMISC: |
| 3760 |
/* |
| 3761 |
* According to deffinition of SI mode, iface in promisc mode |
| 3762 |
* should receive matched and unmatched (in resolution of port) |
| 3763 |
* unicast packets. |
| 3764 |
*/ |
| 3765 |
bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); |
| 3766 |
bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); |
| 3767 |
bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); |
| 3768 |
bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); |
| 3769 |
|
| 3770 |
/* internal switching mode */ |
| 3771 |
bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); |
| 3772 |
bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); |
| 3773 |
|
| 3774 |
if (IS_MF_SI(sc)) { |
| 3775 |
bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); |
| 3776 |
} else { |
| 3777 |
bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); |
| 3778 |
} |
| 3779 |
|
| 3780 |
break; |
| 3781 |
|
| 3782 |
default: |
| 3783 |
BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode); |
| 3784 |
return (-1); |
| 3785 |
} |
| 3786 |
|
| 3787 |
/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ |
| 3788 |
if (rx_mode != BXE_RX_MODE_NONE) { |
| 3789 |
bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); |
| 3790 |
bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); |
| 3791 |
} |
| 3792 |
|
| 3793 |
return (0); |
| 3794 |
} |
| 3795 |
|
| 3796 |
static int |
| 3797 |
bxe_set_q_rx_mode(struct bxe_softc *sc, |
| 3798 |
uint8_t cl_id, |
| 3799 |
unsigned long rx_mode_flags, |
| 3800 |
unsigned long rx_accept_flags, |
| 3801 |
unsigned long tx_accept_flags, |
| 3802 |
unsigned long ramrod_flags) |
| 3803 |
{ |
| 3804 |
struct ecore_rx_mode_ramrod_params ramrod_param; |
| 3805 |
int rc; |
| 3806 |
|
| 3807 |
memset(&ramrod_param, 0, sizeof(ramrod_param)); |
| 3808 |
|
| 3809 |
/* Prepare ramrod parameters */ |
| 3810 |
ramrod_param.cid = 0; |
| 3811 |
ramrod_param.cl_id = cl_id; |
| 3812 |
ramrod_param.rx_mode_obj = &sc->rx_mode_obj; |
| 3813 |
ramrod_param.func_id = SC_FUNC(sc); |
| 3814 |
|
| 3815 |
ramrod_param.pstate = &sc->sp_state; |
| 3816 |
ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; |
| 3817 |
|
| 3818 |
ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); |
| 3819 |
ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); |
| 3820 |
|
| 3821 |
bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); |
| 3822 |
|
| 3823 |
ramrod_param.ramrod_flags = ramrod_flags; |
| 3824 |
ramrod_param.rx_mode_flags = rx_mode_flags; |
| 3825 |
|
| 3826 |
ramrod_param.rx_accept_flags = rx_accept_flags; |
| 3827 |
ramrod_param.tx_accept_flags = tx_accept_flags; |
| 3828 |
|
| 3829 |
rc = ecore_config_rx_mode(sc, &ramrod_param); |
| 3830 |
if (rc < 0) { |
| 3831 |
BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode); |
| 3832 |
return (rc); |
| 3833 |
} |
| 3834 |
|
| 3835 |
return (0); |
| 3836 |
} |
| 3837 |
|
| 3838 |
static int |
| 3839 |
bxe_set_storm_rx_mode(struct bxe_softc *sc) |
| 3840 |
{ |
| 3841 |
unsigned long rx_mode_flags = 0, ramrod_flags = 0; |
| 3842 |
unsigned long rx_accept_flags = 0, tx_accept_flags = 0; |
| 3843 |
int rc; |
| 3844 |
|
| 3845 |
rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, |
| 3846 |
&tx_accept_flags); |
| 3847 |
if (rc) { |
| 3848 |
return (rc); |
| 3849 |
} |
| 3850 |
|
| 3851 |
bxe_set_bit(RAMROD_RX, &ramrod_flags); |
| 3852 |
bxe_set_bit(RAMROD_TX, &ramrod_flags); |
| 3853 |
|
| 3854 |
/* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ |
| 3855 |
return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, |
| 3856 |
rx_accept_flags, tx_accept_flags, |
| 3857 |
ramrod_flags)); |
| 3858 |
} |
| 3859 |
|
| 3860 |
/* returns the "mcp load_code" according to global load_count array */ |
| 3861 |
static int |
| 3862 |
bxe_nic_load_no_mcp(struct bxe_softc *sc) |
| 3863 |
{ |
| 3864 |
int path = SC_PATH(sc); |
| 3865 |
int port = SC_PORT(sc); |
| 3866 |
|
| 3867 |
BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", |
| 3868 |
path, load_count[path][0], load_count[path][1], |
| 3869 |
load_count[path][2]); |
| 3870 |
load_count[path][0]++; |
| 3871 |
load_count[path][1 + port]++; |
| 3872 |
BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", |
| 3873 |
path, load_count[path][0], load_count[path][1], |
| 3874 |
load_count[path][2]); |
| 3875 |
if (load_count[path][0] == 1) { |
| 3876 |
return (FW_MSG_CODE_DRV_LOAD_COMMON); |
| 3877 |
} else if (load_count[path][1 + port] == 1) { |
| 3878 |
return (FW_MSG_CODE_DRV_LOAD_PORT); |
| 3879 |
} else { |
| 3880 |
return (FW_MSG_CODE_DRV_LOAD_FUNCTION); |
| 3881 |
} |
| 3882 |
} |
| 3883 |
|
| 3884 |
/* returns the "mcp load_code" according to global load_count array */ |
| 3885 |
static int |
| 3886 |
bxe_nic_unload_no_mcp(struct bxe_softc *sc) |
| 3887 |
{ |
| 3888 |
int port = SC_PORT(sc); |
| 3889 |
int path = SC_PATH(sc); |
| 3890 |
|
| 3891 |
BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", |
| 3892 |
path, load_count[path][0], load_count[path][1], |
| 3893 |
load_count[path][2]); |
| 3894 |
load_count[path][0]--; |
| 3895 |
load_count[path][1 + port]--; |
| 3896 |
BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", |
| 3897 |
path, load_count[path][0], load_count[path][1], |
| 3898 |
load_count[path][2]); |
| 3899 |
if (load_count[path][0] == 0) { |
| 3900 |
return (FW_MSG_CODE_DRV_UNLOAD_COMMON); |
| 3901 |
} else if (load_count[path][1 + port] == 0) { |
| 3902 |
return (FW_MSG_CODE_DRV_UNLOAD_PORT); |
| 3903 |
} else { |
| 3904 |
return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); |
| 3905 |
} |
| 3906 |
} |
| 3907 |
|
| 3908 |
/* request unload mode from the MCP: COMMON, PORT or FUNCTION */ |
| 3909 |
static uint32_t |
| 3910 |
bxe_send_unload_req(struct bxe_softc *sc, |
| 3911 |
int unload_mode) |
| 3912 |
{ |
| 3913 |
uint32_t reset_code = 0; |
| 3914 |
#if 0 |
| 3915 |
int port = SC_PORT(sc); |
| 3916 |
int path = SC_PATH(sc); |
| 3917 |
#endif |
| 3918 |
|
| 3919 |
/* Select the UNLOAD request mode */ |
| 3920 |
if (unload_mode == UNLOAD_NORMAL) { |
| 3921 |
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
| 3922 |
} |
| 3923 |
#if 0 |
| 3924 |
else if (sc->flags & BXE_NO_WOL_FLAG) { |
| 3925 |
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; |
| 3926 |
} else if (sc->wol) { |
| 3927 |
uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
| 3928 |
uint8_t *mac_addr = sc->dev->dev_addr; |
| 3929 |
uint32_t val; |
| 3930 |
uint16_t pmc; |
| 3931 |
|
| 3932 |
/* |
| 3933 |
* The mac address is written to entries 1-4 to |
| 3934 |
* preserve entry 0 which is used by the PMF |
| 3935 |
*/ |
| 3936 |
uint8_t entry = (SC_VN(sc) + 1)*8; |
| 3937 |
|
| 3938 |
val = (mac_addr[0] << 8) | mac_addr[1]; |
| 3939 |
EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val); |
| 3940 |
|
| 3941 |
val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | |
| 3942 |
(mac_addr[4] << 8) | mac_addr[5]; |
| 3943 |
EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); |
| 3944 |
|
| 3945 |
/* Enable the PME and clear the status */ |
| 3946 |
pmc = pci_read_config(sc->dev, |
| 3947 |
(sc->devinfo.pcie_pm_cap_reg + |
| 3948 |
PCIR_POWER_STATUS), |
| 3949 |
2); |
| 3950 |
pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME; |
| 3951 |
pci_write_config(sc->dev, |
| 3952 |
(sc->devinfo.pcie_pm_cap_reg + |
| 3953 |
PCIR_POWER_STATUS), |
| 3954 |
pmc, 4); |
| 3955 |
|
| 3956 |
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
| 3957 |
} |
| 3958 |
#endif |
| 3959 |
else { |
| 3960 |
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
| 3961 |
} |
| 3962 |
|
| 3963 |
/* Send the request to the MCP */ |
| 3964 |
if (!BXE_NOMCP(sc)) { |
| 3965 |
reset_code = bxe_fw_command(sc, reset_code, 0); |
| 3966 |
} else { |
| 3967 |
reset_code = bxe_nic_unload_no_mcp(sc); |
| 3968 |
} |
| 3969 |
|
| 3970 |
return (reset_code); |
| 3971 |
} |
| 3972 |
|
| 3973 |
/* send UNLOAD_DONE command to the MCP */ |
| 3974 |
static void |
| 3975 |
bxe_send_unload_done(struct bxe_softc *sc, |
| 3976 |
uint8_t keep_link) |
| 3977 |
{ |
| 3978 |
uint32_t reset_param = |
| 3979 |
keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; |
| 3980 |
|
| 3981 |
/* Report UNLOAD_DONE to MCP */ |
| 3982 |
if (!BXE_NOMCP(sc)) { |
| 3983 |
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); |
| 3984 |
} |
| 3985 |
} |
| 3986 |
|
| 3987 |
static int |
| 3988 |
bxe_func_wait_started(struct bxe_softc *sc) |
| 3989 |
{ |
| 3990 |
int tout = 50; |
| 3991 |
|
| 3992 |
if (!sc->port.pmf) { |
| 3993 |
return (0); |
| 3994 |
} |
| 3995 |
|
| 3996 |
/* |
| 3997 |
* (assumption: No Attention from MCP at this stage) |
| 3998 |
* PMF probably in the middle of TX disable/enable transaction |
| 3999 |
* 1. Sync IRS for default SB |
| 4000 |
* 2. Sync SP queue - this guarantees us that attention handling started |
| 4001 |
* 3. Wait, that TX disable/enable transaction completes |
| 4002 |
* |
| 4003 |
* 1+2 guarantee that if DCBX attention was scheduled it already changed |
| 4004 |
* pending bit of transaction from STARTED-->TX_STOPPED, if we already |
| 4005 |
* received completion for the transaction the state is TX_STOPPED. |
| 4006 |
* State will return to STARTED after completion of TX_STOPPED-->STARTED |
| 4007 |
* transaction. |
| 4008 |
*/ |
| 4009 |
|
| 4010 |
/* XXX make sure default SB ISR is done */ |
| 4011 |
/* need a way to synchronize an irq (intr_mtx?) */ |
| 4012 |
|
| 4013 |
/* XXX flush any work queues */ |
| 4014 |
|
| 4015 |
while (ecore_func_get_state(sc, &sc->func_obj) != |
| 4016 |
ECORE_F_STATE_STARTED && tout--) { |
| 4017 |
DELAY(20000); |
| 4018 |
} |
| 4019 |
|
| 4020 |
if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { |
| 4021 |
/* |
| 4022 |
* Failed to complete the transaction in a "good way" |
| 4023 |
* Force both transactions with CLR bit. |
| 4024 |
*/ |
| 4025 |
struct ecore_func_state_params func_params = { NULL }; |
| 4026 |
|
| 4027 |
BLOGE(sc, "Unexpected function state! " |
| 4028 |
"Forcing STARTED-->TX_STOPPED-->STARTED\n"); |
| 4029 |
|
| 4030 |
func_params.f_obj = &sc->func_obj; |
| 4031 |
bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); |
| 4032 |
|
| 4033 |
/* STARTED-->TX_STOPPED */ |
| 4034 |
func_params.cmd = ECORE_F_CMD_TX_STOP; |
| 4035 |
ecore_func_state_change(sc, &func_params); |
| 4036 |
|
| 4037 |
/* TX_STOPPED-->STARTED */ |
| 4038 |
func_params.cmd = ECORE_F_CMD_TX_START; |
| 4039 |
return (ecore_func_state_change(sc, &func_params)); |
| 4040 |
} |
| 4041 |
|
| 4042 |
return (0); |
| 4043 |
} |
| 4044 |
|
| 4045 |
static int |
| 4046 |
bxe_stop_queue(struct bxe_softc *sc, |
| 4047 |
int index) |
| 4048 |
{ |
| 4049 |
struct bxe_fastpath *fp = &sc->fp[index]; |
| 4050 |
struct ecore_queue_state_params q_params = { NULL }; |
| 4051 |
int rc; |
| 4052 |
|
| 4053 |
BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); |
| 4054 |
|
| 4055 |
q_params.q_obj = &sc->sp_objs[fp->index].q_obj; |
| 4056 |
/* We want to wait for completion in this context */ |
| 4057 |
bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); |
| 4058 |
|
| 4059 |
/* Stop the primary connection: */ |
| 4060 |
|
| 4061 |
/* ...halt the connection */ |
| 4062 |
q_params.cmd = ECORE_Q_CMD_HALT; |
| 4063 |
rc = ecore_queue_state_change(sc, &q_params); |
| 4064 |
if (rc) { |
| 4065 |
return (rc); |
| 4066 |
} |
| 4067 |
|
| 4068 |
/* ...terminate the connection */ |
| 4069 |
q_params.cmd = ECORE_Q_CMD_TERMINATE; |
| 4070 |
memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); |
| 4071 |
q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; |
| 4072 |
rc = ecore_queue_state_change(sc, &q_params); |
| 4073 |
if (rc) { |
| 4074 |
return (rc); |
| 4075 |
} |
| 4076 |
|
| 4077 |
/* ...delete cfc entry */ |
| 4078 |
q_params.cmd = ECORE_Q_CMD_CFC_DEL; |
| 4079 |
memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); |
| 4080 |
q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; |
| 4081 |
return (ecore_queue_state_change(sc, &q_params)); |
| 4082 |
} |
| 4083 |
|
| 4084 |
/* wait for the outstanding SP commands */ |
| 4085 |
static inline uint8_t |
| 4086 |
bxe_wait_sp_comp(struct bxe_softc *sc, |
| 4087 |
unsigned long mask) |
| 4088 |
{ |
| 4089 |
unsigned long tmp; |
| 4090 |
int tout = 5000; /* wait for 5 secs tops */ |
| 4091 |
|
| 4092 |
while (tout--) { |
| 4093 |
mb(); |
| 4094 |
if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { |
| 4095 |
return (TRUE); |
| 4096 |
} |
| 4097 |
|
| 4098 |
DELAY(1000); |
| 4099 |
} |
| 4100 |
|
| 4101 |
mb(); |
| 4102 |
|
| 4103 |
tmp = atomic_load_acq_long(&sc->sp_state); |
| 4104 |
if (tmp & mask) { |
| 4105 |
BLOGE(sc, "Filtering completion timed out: " |
| 4106 |
"sp_state 0x%lx, mask 0x%lx\n", |
| 4107 |
tmp, mask); |
| 4108 |
return (FALSE); |
| 4109 |
} |
| 4110 |
|
| 4111 |
return (FALSE); |
| 4112 |
} |
| 4113 |
|
| 4114 |
static int |
| 4115 |
bxe_func_stop(struct bxe_softc *sc) |
| 4116 |
{ |
| 4117 |
struct ecore_func_state_params func_params = { NULL }; |
| 4118 |
int rc; |
| 4119 |
|
| 4120 |
/* prepare parameters for function state transitions */ |
| 4121 |
bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); |
| 4122 |
func_params.f_obj = &sc->func_obj; |
| 4123 |
func_params.cmd = ECORE_F_CMD_STOP; |
| 4124 |
|
| 4125 |
/* |
| 4126 |
* Try to stop the function the 'good way'. If it fails (in case |
| 4127 |
* of a parity error during bxe_chip_cleanup()) and we are |
| 4128 |
* not in a debug mode, perform a state transaction in order to |
| 4129 |
* enable further HW_RESET transaction. |
| 4130 |
*/ |
| 4131 |
rc = ecore_func_state_change(sc, &func_params); |
| 4132 |
if (rc) { |
| 4133 |
BLOGE(sc, "FUNC_STOP ramrod failed. " |
| 4134 |
"Running a dry transaction\n"); |
| 4135 |
bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); |
| 4136 |
return (ecore_func_state_change(sc, &func_params)); |
| 4137 |
} |
| 4138 |
|
| 4139 |
return (0); |
| 4140 |
} |
| 4141 |
|
| 4142 |
static int |
| 4143 |
bxe_reset_hw(struct bxe_softc *sc, |
| 4144 |
uint32_t load_code) |
| 4145 |
{ |
| 4146 |
struct ecore_func_state_params func_params = { NULL }; |
| 4147 |
|
| 4148 |
/* Prepare parameters for function state transitions */ |
| 4149 |
bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); |
| 4150 |
|
| 4151 |
func_params.f_obj = &sc->func_obj; |
| 4152 |
func_params.cmd = ECORE_F_CMD_HW_RESET; |
| 4153 |
|
| 4154 |
func_params.params.hw_init.load_phase = load_code; |
| 4155 |
|
| 4156 |
return (ecore_func_state_change(sc, &func_params)); |
| 4157 |
} |
| 4158 |
|
| 4159 |
static void |
| 4160 |
bxe_int_disable_sync(struct bxe_softc *sc, |
| 4161 |
int disable_hw) |
| 4162 |
{ |
| 4163 |
if (disable_hw) { |
| 4164 |
/* prevent the HW from sending interrupts */ |
| 4165 |
bxe_int_disable(sc); |
| 4166 |
} |
| 4167 |
|
| 4168 |
/* XXX need a way to synchronize ALL irqs (intr_mtx?) */ |
| 4169 |
/* make sure all ISRs are done */ |
| 4170 |
|
| 4171 |
/* XXX make sure sp_task is not running */ |
| 4172 |
/* cancel and flush work queues */ |
| 4173 |
} |
| 4174 |
|
| 4175 |
static void |
| 4176 |
bxe_chip_cleanup(struct bxe_softc *sc, |
| 4177 |
uint32_t unload_mode, |
| 4178 |
uint8_t keep_link) |
| 4179 |
{ |
| 4180 |
int port = SC_PORT(sc); |
| 4181 |
struct ecore_mcast_ramrod_params rparam = { NULL }; |
| 4182 |
uint32_t reset_code; |
| 4183 |
int i, rc = 0; |
| 4184 |
|
| 4185 |
bxe_drain_tx_queues(sc); |
| 4186 |
|
| 4187 |
/* give HW time to discard old tx messages */ |
| 4188 |
DELAY(1000); |
| 4189 |
|
| 4190 |
/* Clean all ETH MACs */ |
| 4191 |
rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); |
| 4192 |
if (rc < 0) { |
| 4193 |
BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); |
| 4194 |
} |
| 4195 |
|
| 4196 |
/* Clean up UC list */ |
| 4197 |
rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); |
| 4198 |
if (rc < 0) { |
| 4199 |
BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); |
| 4200 |
} |
| 4201 |
|
| 4202 |
/* Disable LLH */ |
| 4203 |
if (!CHIP_IS_E1(sc)) { |
| 4204 |
REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); |
| 4205 |
} |
| 4206 |
|
| 4207 |
/* Set "drop all" to stop Rx */ |
| 4208 |
|
| 4209 |
/* |
| 4210 |
* We need to take the BXE_MCAST_LOCK() here in order to prevent |
| 4211 |
* a race between the completion code and this code. |
| 4212 |
*/ |
| 4213 |
BXE_MCAST_LOCK(sc); |
| 4214 |
|
| 4215 |
if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { |
| 4216 |
bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); |
| 4217 |
} else { |
| 4218 |
bxe_set_storm_rx_mode(sc); |
| 4219 |
} |
| 4220 |
|
| 4221 |
/* Clean up multicast configuration */ |
| 4222 |
rparam.mcast_obj = &sc->mcast_obj; |
| 4223 |
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); |
| 4224 |
if (rc < 0) { |
| 4225 |
BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); |
| 4226 |
} |
| 4227 |
|
| 4228 |
BXE_MCAST_UNLOCK(sc); |
| 4229 |
|
| 4230 |
// XXX bxe_iov_chip_cleanup(sc); |
| 4231 |
|
| 4232 |
/* |
| 4233 |
* Send the UNLOAD_REQUEST to the MCP. This will return if |
| 4234 |
* this function should perform FUNCTION, PORT, or COMMON HW |
| 4235 |
* reset. |
| 4236 |
*/ |
| 4237 |
reset_code = bxe_send_unload_req(sc, unload_mode); |
| 4238 |
|
| 4239 |
/* |
| 4240 |
* (assumption: No Attention from MCP at this stage) |
| 4241 |
* PMF probably in the middle of TX disable/enable transaction |
| 4242 |
*/ |
| 4243 |
rc = bxe_func_wait_started(sc); |
| 4244 |
if (rc) { |
| 4245 |
BLOGE(sc, "bxe_func_wait_started failed\n"); |
| 4246 |
} |
| 4247 |
|
| 4248 |
/* |
| 4249 |
* Close multi and leading connections |
| 4250 |
* Completions for ramrods are collected in a synchronous way |
| 4251 |
*/ |
| 4252 |
for (i = 0; i < sc->num_queues; i++) { |
| 4253 |
if (bxe_stop_queue(sc, i)) { |
| 4254 |
goto unload_error; |
| 4255 |
} |
| 4256 |
} |
| 4257 |
|
| 4258 |
/* |
| 4259 |
* If SP settings didn't get completed so far - something |
| 4260 |
* very wrong has happen. |
| 4261 |
*/ |
| 4262 |
if (!bxe_wait_sp_comp(sc, ~0x0UL)) { |
| 4263 |
BLOGE(sc, "Common slow path ramrods got stuck!\n"); |
| 4264 |
} |
| 4265 |
|
| 4266 |
unload_error: |
| 4267 |
|
| 4268 |
rc = bxe_func_stop(sc); |
| 4269 |
if (rc) { |
| 4270 |
BLOGE(sc, "Function stop failed!\n"); |
| 4271 |
} |
| 4272 |
|
| 4273 |
/* disable HW interrupts */ |
| 4274 |
bxe_int_disable_sync(sc, TRUE); |
| 4275 |
|
| 4276 |
/* detach interrupts */ |
| 4277 |
bxe_interrupt_detach(sc); |
| 4278 |
|
| 4279 |
/* Reset the chip */ |
| 4280 |
rc = bxe_reset_hw(sc, reset_code); |
| 4281 |
if (rc) { |
| 4282 |
BLOGE(sc, "Hardware reset failed\n"); |
| 4283 |
} |
| 4284 |
|
| 4285 |
/* Report UNLOAD_DONE to MCP */ |
| 4286 |
bxe_send_unload_done(sc, keep_link); |
| 4287 |
} |
| 4288 |
|
| 4289 |
static void |
| 4290 |
bxe_disable_close_the_gate(struct bxe_softc *sc) |
| 4291 |
{ |
| 4292 |
uint32_t val; |
| 4293 |
int port = SC_PORT(sc); |
| 4294 |
|
| 4295 |
BLOGD(sc, DBG_LOAD, |
| 4296 |
"Disabling 'close the gates'\n"); |
| 4297 |
|
| 4298 |
if (CHIP_IS_E1(sc)) { |
| 4299 |
uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
| 4300 |
MISC_REG_AEU_MASK_ATTN_FUNC_0; |
| 4301 |
val = REG_RD(sc, addr); |
| 4302 |
val &= ~(0x300); |
| 4303 |
REG_WR(sc, addr, val); |
| 4304 |
} else { |
| 4305 |
val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); |
| 4306 |
val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | |
| 4307 |
MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); |
| 4308 |
REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); |
| 4309 |
} |
| 4310 |
} |
| 4311 |
|
| 4312 |
/* |
| 4313 |
* Cleans the object that have internal lists without sending |
| 4314 |
* ramrods. Should be run when interrutps are disabled. |
| 4315 |
*/ |
| 4316 |
static void |
| 4317 |
bxe_squeeze_objects(struct bxe_softc *sc) |
| 4318 |
{ |
| 4319 |
unsigned long ramrod_flags = 0, vlan_mac_flags = 0; |
| 4320 |
struct ecore_mcast_ramrod_params rparam = { NULL }; |
| 4321 |
struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; |
| 4322 |
int rc; |
| 4323 |
|
| 4324 |
/* Cleanup MACs' object first... */ |
| 4325 |
|
| 4326 |
/* Wait for completion of requested */ |
| 4327 |
bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
| 4328 |
/* Perform a dry cleanup */ |
| 4329 |
bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); |
| 4330 |
|
| 4331 |
/* Clean ETH primary MAC */ |
| 4332 |
bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); |
| 4333 |
rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, |
| 4334 |
&ramrod_flags); |
| 4335 |
if (rc != 0) { |
| 4336 |
BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); |
| 4337 |
} |
| 4338 |
|
| 4339 |
/* Cleanup UC list */ |
| 4340 |
vlan_mac_flags = 0; |
| 4341 |
bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); |
| 4342 |
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, |
| 4343 |
&ramrod_flags); |
| 4344 |
if (rc != 0) { |
| 4345 |
BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); |
| 4346 |
} |
| 4347 |
|
| 4348 |
/* Now clean mcast object... */ |
| 4349 |
|
| 4350 |
rparam.mcast_obj = &sc->mcast_obj; |
| 4351 |
bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); |
| 4352 |
|
| 4353 |
/* Add a DEL command... */ |
| 4354 |
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); |
| 4355 |
if (rc < 0) { |
| 4356 |
BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); |
| 4357 |
} |
| 4358 |
|
| 4359 |
/* now wait until all pending commands are cleared */ |
| 4360 |
|
| 4361 |
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); |
| 4362 |
while (rc != 0) { |
| 4363 |
if (rc < 0) { |
| 4364 |
BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); |
| 4365 |
return; |
| 4366 |
} |
| 4367 |
|
| 4368 |
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); |
| 4369 |
} |
| 4370 |
} |
| 4371 |
|
| 4372 |
/* stop the controller */ |
| 4373 |
static __noinline int |
| 4374 |
bxe_nic_unload(struct bxe_softc *sc, |
| 4375 |
uint32_t unload_mode, |
| 4376 |
uint8_t keep_link) |
| 4377 |
{ |
| 4378 |
uint8_t global = FALSE; |
| 4379 |
uint32_t val; |
| 4380 |
|
| 4381 |
BXE_CORE_LOCK_ASSERT(sc); |
| 4382 |
|
| 4383 |
BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); |
| 4384 |
|
| 4385 |
/* mark driver as unloaded in shmem2 */ |
| 4386 |
if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { |
| 4387 |
val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); |
| 4388 |
SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], |
| 4389 |
val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); |
| 4390 |
} |
| 4391 |
|
| 4392 |
if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && |
| 4393 |
(sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { |
| 4394 |
/* |
| 4395 |
* We can get here if the driver has been unloaded |
| 4396 |
* during parity error recovery and is either waiting for a |
| 4397 |
* leader to complete or for other functions to unload and |
| 4398 |
* then ifconfig down has been issued. In this case we want to |
| 4399 |
* unload and let other functions to complete a recovery |
| 4400 |
* process. |
| 4401 |
*/ |
| 4402 |
sc->recovery_state = BXE_RECOVERY_DONE; |
| 4403 |
sc->is_leader = 0; |
| 4404 |
bxe_release_leader_lock(sc); |
| 4405 |
mb(); |
| 4406 |
|
| 4407 |
BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); |
| 4408 |
BLOGE(sc, "Can't unload in closed or error state\n"); |
| 4409 |
return (-1); |
| 4410 |
} |
| 4411 |
|
| 4412 |
/* |
| 4413 |
* Nothing to do during unload if previous bxe_nic_load() |
| 4414 |
* did not completed succesfully - all resourses are released. |
| 4415 |
*/ |
| 4416 |
if ((sc->state == BXE_STATE_CLOSED) || |
| 4417 |
(sc->state == BXE_STATE_ERROR)) { |
| 4418 |
return (0); |
| 4419 |
} |
| 4420 |
|
| 4421 |
sc->state = BXE_STATE_CLOSING_WAITING_HALT; |
| 4422 |
mb(); |
| 4423 |
|
| 4424 |
/* stop tx */ |
| 4425 |
bxe_tx_disable(sc); |
| 4426 |
|
| 4427 |
sc->rx_mode = BXE_RX_MODE_NONE; |
| 4428 |
/* XXX set rx mode ??? */ |
| 4429 |
|
| 4430 |
if (IS_PF(sc)) { |
| 4431 |
/* set ALWAYS_ALIVE bit in shmem */ |
| 4432 |
sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; |
| 4433 |
|
| 4434 |
bxe_drv_pulse(sc); |
| 4435 |
|
| 4436 |
bxe_stats_handle(sc, STATS_EVENT_STOP); |
| 4437 |
bxe_save_statistics(sc); |
| 4438 |
} |
| 4439 |
|
| 4440 |
/* wait till consumers catch up with producers in all queues */ |
| 4441 |
bxe_drain_tx_queues(sc); |
| 4442 |
|
| 4443 |
/* if VF indicate to PF this function is going down (PF will delete sp |
| 4444 |
* elements and clear initializations |
| 4445 |
*/ |
| 4446 |
if (IS_VF(sc)) { |
| 4447 |
; /* bxe_vfpf_close_vf(sc); */ |
| 4448 |
} else if (unload_mode != UNLOAD_RECOVERY) { |
| 4449 |
/* if this is a normal/close unload need to clean up chip */ |
| 4450 |
bxe_chip_cleanup(sc, unload_mode, keep_link); |
| 4451 |
} else { |
| 4452 |
/* Send the UNLOAD_REQUEST to the MCP */ |
| 4453 |
bxe_send_unload_req(sc, unload_mode); |
| 4454 |
|
| 4455 |
/* |
| 4456 |
* Prevent transactions to host from the functions on the |
| 4457 |
* engine that doesn't reset global blocks in case of global |
| 4458 |
* attention once gloabl blocks are reset and gates are opened |
| 4459 |
* (the engine which leader will perform the recovery |
| 4460 |
* last). |
| 4461 |
*/ |
| 4462 |
if (!CHIP_IS_E1x(sc)) { |
| 4463 |
bxe_pf_disable(sc); |
| 4464 |
} |
| 4465 |
|
| 4466 |
/* disable HW interrupts */ |
| 4467 |
bxe_int_disable_sync(sc, TRUE); |
| 4468 |
|
| 4469 |
/* detach interrupts */ |
| 4470 |
bxe_interrupt_detach(sc); |
| 4471 |
|
| 4472 |
/* Report UNLOAD_DONE to MCP */ |
| 4473 |
bxe_send_unload_done(sc, FALSE); |
| 4474 |
} |
| 4475 |
|
| 4476 |
/* |
| 4477 |
* At this stage no more interrupts will arrive so we may safely clean |
| 4478 |
* the queue'able objects here in case they failed to get cleaned so far. |
| 4479 |
*/ |
| 4480 |
if (IS_PF(sc)) { |
| 4481 |
bxe_squeeze_objects(sc); |
| 4482 |
} |
| 4483 |
|
| 4484 |
/* There should be no more pending SP commands at this stage */ |
| 4485 |
sc->sp_state = 0; |
| 4486 |
|
| 4487 |
sc->port.pmf = 0; |
| 4488 |
|
| 4489 |
bxe_free_fp_buffers(sc); |
| 4490 |
|
| 4491 |
if (IS_PF(sc)) { |
| 4492 |
bxe_free_mem(sc); |
| 4493 |
} |
| 4494 |
|
| 4495 |
bxe_free_fw_stats_mem(sc); |
| 4496 |
|
| 4497 |
sc->state = BXE_STATE_CLOSED; |
| 4498 |
|
| 4499 |
/* |
| 4500 |
* Check if there are pending parity attentions. If there are - set |
| 4501 |
* RECOVERY_IN_PROGRESS. |
| 4502 |
*/ |
| 4503 |
if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { |
| 4504 |
bxe_set_reset_in_progress(sc); |
| 4505 |
|
| 4506 |
/* Set RESET_IS_GLOBAL if needed */ |
| 4507 |
if (global) { |
| 4508 |
bxe_set_reset_global(sc); |
| 4509 |
} |
| 4510 |
} |
| 4511 |
|
| 4512 |
/* |
| 4513 |
* The last driver must disable a "close the gate" if there is no |
| 4514 |
* parity attention or "process kill" pending. |
| 4515 |
*/ |
| 4516 |
if (IS_PF(sc) && !bxe_clear_pf_load(sc) && |
| 4517 |
bxe_reset_is_done(sc, SC_PATH(sc))) { |
| 4518 |
bxe_disable_close_the_gate(sc); |
| 4519 |
} |
| 4520 |
|
| 4521 |
BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); |
| 4522 |
|
| 4523 |
return (0); |
| 4524 |
} |
| 4525 |
|
| 4526 |
/* |
| 4527 |
* Called by the OS to set various media options (i.e. link, speed, etc.) when |
| 4528 |
* the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". |
| 4529 |
*/ |
| 4530 |
static int |
| 4531 |
bxe_ifmedia_update(struct ifnet *ifp) |
| 4532 |
{ |
| 4533 |
struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); |
| 4534 |
struct ifmedia *ifm; |
| 4535 |
|
| 4536 |
ifm = &sc->ifmedia; |
| 4537 |
|
| 4538 |
/* We only support Ethernet media type. */ |
| 4539 |
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { |
| 4540 |
return (EINVAL); |
| 4541 |
} |
| 4542 |
|
| 4543 |
switch (IFM_SUBTYPE(ifm->ifm_media)) { |
| 4544 |
case IFM_AUTO: |
| 4545 |
break; |
| 4546 |
case IFM_10G_CX4: |
| 4547 |
case IFM_10G_SR: |
| 4548 |
case IFM_10G_T: |
| 4549 |
case IFM_10G_TWINAX: |
| 4550 |
default: |
| 4551 |
/* We don't support changing the media type. */ |
| 4552 |
BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", |
| 4553 |
IFM_SUBTYPE(ifm->ifm_media)); |
| 4554 |
return (EINVAL); |
| 4555 |
} |
| 4556 |
|
| 4557 |
return (0); |
| 4558 |
} |
| 4559 |
|
| 4560 |
/* |
| 4561 |
* Called by the OS to get the current media status (i.e. link, speed, etc.). |
| 4562 |
*/ |
| 4563 |
static void |
| 4564 |
bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) |
| 4565 |
{ |
| 4566 |
struct bxe_softc *sc = if_getsoftc(ifp); |
| 4567 |
|
| 4568 |
/* Report link down if the driver isn't running. */ |
| 4569 |
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { |
| 4570 |
ifmr->ifm_active |= IFM_NONE; |
| 4571 |
return; |
| 4572 |
} |
| 4573 |
|
| 4574 |
/* Setup the default interface info. */ |
| 4575 |
ifmr->ifm_status = IFM_AVALID; |
| 4576 |
ifmr->ifm_active = IFM_ETHER; |
| 4577 |
|
| 4578 |
if (sc->link_vars.link_up) { |
| 4579 |
ifmr->ifm_status |= IFM_ACTIVE; |
| 4580 |
} else { |
| 4581 |
ifmr->ifm_active |= IFM_NONE; |
| 4582 |
return; |
| 4583 |
} |
| 4584 |
|
| 4585 |
ifmr->ifm_active |= sc->media; |
| 4586 |
|
| 4587 |
if (sc->link_vars.duplex == DUPLEX_FULL) { |
| 4588 |
ifmr->ifm_active |= IFM_FDX; |
| 4589 |
} else { |
| 4590 |
ifmr->ifm_active |= IFM_HDX; |
| 4591 |
} |
| 4592 |
} |
| 4593 |
|
| 4594 |
static int |
| 4595 |
bxe_ioctl_nvram(struct bxe_softc *sc, |
| 4596 |
uint32_t priv_op, |
| 4597 |
struct ifreq *ifr) |
| 4598 |
{ |
| 4599 |
struct bxe_nvram_data nvdata_base; |
| 4600 |
struct bxe_nvram_data *nvdata; |
| 4601 |
int len; |
| 4602 |
int error = 0; |
| 4603 |
|
| 4604 |
copyin(ifr->ifr_data, &nvdata_base, sizeof(nvdata_base)); |
| 4605 |
|
| 4606 |
len = (sizeof(struct bxe_nvram_data) + |
| 4607 |
nvdata_base.len - |
| 4608 |
sizeof(uint32_t)); |
| 4609 |
|
| 4610 |
if (len > sizeof(struct bxe_nvram_data)) { |
| 4611 |
if ((nvdata = (struct bxe_nvram_data *) |
| 4612 |
malloc(len, M_DEVBUF, |
| 4613 |
(M_NOWAIT | M_ZERO))) == NULL) { |
| 4614 |
BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n"); |
| 4615 |
return (1); |
| 4616 |
} |
| 4617 |
memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data)); |
| 4618 |
} else { |
| 4619 |
nvdata = &nvdata_base; |
| 4620 |
} |
| 4621 |
|
| 4622 |
if (priv_op == BXE_IOC_RD_NVRAM) { |
| 4623 |
BLOGD(sc, DBG_IOCTL, "IOC_RD_NVRAM 0x%x %d\n", |
| 4624 |
nvdata->offset, nvdata->len); |
| 4625 |
error = bxe_nvram_read(sc, |
| 4626 |
nvdata->offset, |
| 4627 |
(uint8_t *)nvdata->value, |
| 4628 |
nvdata->len); |
| 4629 |
copyout(nvdata, ifr->ifr_data, len); |
| 4630 |
} else { /* BXE_IOC_WR_NVRAM */ |
| 4631 |
BLOGD(sc, DBG_IOCTL, "IOC_WR_NVRAM 0x%x %d\n", |
| 4632 |
nvdata->offset, nvdata->len); |
| 4633 |
copyin(ifr->ifr_data, nvdata, len); |
| 4634 |
error = bxe_nvram_write(sc, |
| 4635 |
nvdata->offset, |
| 4636 |
(uint8_t *)nvdata->value, |
| 4637 |
nvdata->len); |
| 4638 |
} |
| 4639 |
|
| 4640 |
if (len > sizeof(struct bxe_nvram_data)) { |
| 4641 |
free(nvdata, M_DEVBUF); |
| 4642 |
} |
| 4643 |
|
| 4644 |
return (error); |
| 4645 |
} |
| 4646 |
|
| 4647 |
static int |
| 4648 |
bxe_ioctl_stats_show(struct bxe_softc *sc, |
| 4649 |
uint32_t priv_op, |
| 4650 |
struct ifreq *ifr) |
| 4651 |
{ |
| 4652 |
const size_t str_size = (BXE_NUM_ETH_STATS * STAT_NAME_LEN); |
| 4653 |
const size_t stats_size = (BXE_NUM_ETH_STATS * sizeof(uint64_t)); |
| 4654 |
caddr_t p_tmp; |
| 4655 |
uint32_t *offset; |
| 4656 |
int i; |
| 4657 |
|
| 4658 |
switch (priv_op) |
| 4659 |
{ |
| 4660 |
case BXE_IOC_STATS_SHOW_NUM: |
| 4661 |
memset(ifr->ifr_data, 0, sizeof(union bxe_stats_show_data)); |
| 4662 |
((union bxe_stats_show_data *)ifr->ifr_data)->desc.num = |
| 4663 |
BXE_NUM_ETH_STATS; |
| 4664 |
((union bxe_stats_show_data *)ifr->ifr_data)->desc.len = |
| 4665 |
STAT_NAME_LEN; |
| 4666 |
return (0); |
| 4667 |
|
| 4668 |
case BXE_IOC_STATS_SHOW_STR: |
| 4669 |
memset(ifr->ifr_data, 0, str_size); |
| 4670 |
p_tmp = ifr->ifr_data; |
| 4671 |
for (i = 0; i < BXE_NUM_ETH_STATS; i++) { |
| 4672 |
strcpy(p_tmp, bxe_eth_stats_arr[i].string); |
| 4673 |
p_tmp += STAT_NAME_LEN; |
| 4674 |
} |
| 4675 |
return (0); |
| 4676 |
|
| 4677 |
case BXE_IOC_STATS_SHOW_CNT: |
| 4678 |
memset(ifr->ifr_data, 0, stats_size); |
| 4679 |
p_tmp = ifr->ifr_data; |
| 4680 |
for (i = 0; i < BXE_NUM_ETH_STATS; i++) { |
| 4681 |
offset = ((uint32_t *)&sc->eth_stats + |
| 4682 |
bxe_eth_stats_arr[i].offset); |
| 4683 |
switch (bxe_eth_stats_arr[i].size) { |
| 4684 |
case 4: |
| 4685 |
*((uint64_t *)p_tmp) = (uint64_t)*offset; |
| 4686 |
break; |
| 4687 |
case 8: |
| 4688 |
*((uint64_t *)p_tmp) = HILO_U64(*offset, *(offset + 1)); |
| 4689 |
break; |
| 4690 |
default: |
| 4691 |
*((uint64_t *)p_tmp) = 0; |
| 4692 |
} |
| 4693 |
p_tmp += sizeof(uint64_t); |
| 4694 |
} |
| 4695 |
return (0); |
| 4696 |
|
| 4697 |
default: |
| 4698 |
return (-1); |
| 4699 |
} |
| 4700 |
} |
| 4701 |
|
| 4702 |
static void |
| 4703 |
bxe_handle_chip_tq(void *context, |
| 4704 |
int pending) |
| 4705 |
{ |
| 4706 |
struct bxe_softc *sc = (struct bxe_softc *)context; |
| 4707 |
long work = atomic_load_acq_long(&sc->chip_tq_flags); |
| 4708 |
|
| 4709 |
switch (work) |
| 4710 |
{ |
| 4711 |
case CHIP_TQ_START: |
| 4712 |
if ((if_getflags(sc->ifp) & IFF_UP) && |
| 4713 |
!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { |
| 4714 |
/* start the interface */ |
| 4715 |
BLOGD(sc, DBG_LOAD, "Starting the interface...\n"); |
| 4716 |
BXE_CORE_LOCK(sc); |
| 4717 |
bxe_init_locked(sc); |
| 4718 |
BXE_CORE_UNLOCK(sc); |
| 4719 |
} |
| 4720 |
break; |
| 4721 |
|
| 4722 |
case CHIP_TQ_STOP: |
| 4723 |
if (!(if_getflags(sc->ifp) & IFF_UP) && |
| 4724 |
(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { |
| 4725 |
/* bring down the interface */ |
| 4726 |
BLOGD(sc, DBG_LOAD, "Stopping the interface...\n"); |
| 4727 |
bxe_periodic_stop(sc); |
| 4728 |
BXE_CORE_LOCK(sc); |
| 4729 |
bxe_stop_locked(sc); |
| 4730 |
BXE_CORE_UNLOCK(sc); |
| 4731 |
} |
| 4732 |
break; |
| 4733 |
|
| 4734 |
case CHIP_TQ_REINIT: |
| 4735 |
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { |
| 4736 |
/* restart the interface */ |
| 4737 |
BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); |
| 4738 |
bxe_periodic_stop(sc); |
| 4739 |
BXE_CORE_LOCK(sc); |
| 4740 |
bxe_stop_locked(sc); |
| 4741 |
bxe_init_locked(sc); |
| 4742 |
BXE_CORE_UNLOCK(sc); |
| 4743 |
} |
| 4744 |
break; |
| 4745 |
|
| 4746 |
default: |
| 4747 |
break; |
| 4748 |
} |
| 4749 |
} |
| 4750 |
|
| 4751 |
/* |
| 4752 |
* Handles any IOCTL calls from the operating system. |
| 4753 |
* |
| 4754 |
* Returns: |
| 4755 |
* 0 = Success, >0 Failure |
| 4756 |
*/ |
| 4757 |
static int |
| 4758 |
bxe_ioctl(if_t ifp, |
| 4759 |
u_long command, |
| 4760 |
caddr_t data) |
| 4761 |
{ |
| 4762 |
struct bxe_softc *sc = if_getsoftc(ifp); |
| 4763 |
struct ifreq *ifr = (struct ifreq *)data; |
| 4764 |
struct bxe_nvram_data *nvdata; |
| 4765 |
uint32_t priv_op; |
| 4766 |
int mask = 0; |
| 4767 |
int reinit = 0; |
| 4768 |
int error = 0; |
| 4769 |
|
| 4770 |
int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); |
| 4771 |
int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); |
| 4772 |
|
| 4773 |
switch (command) |
| 4774 |
{ |
| 4775 |
case SIOCSIFMTU: |
| 4776 |
BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", |
| 4777 |
ifr->ifr_mtu); |
| 4778 |
|
| 4779 |
if (sc->mtu == ifr->ifr_mtu) { |
| 4780 |
/* nothing to change */ |
| 4781 |
break; |
| 4782 |
} |
| 4783 |
|
| 4784 |
if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { |
| 4785 |
BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", |
| 4786 |
ifr->ifr_mtu, mtu_min, mtu_max); |
| 4787 |
error = EINVAL; |
| 4788 |
break; |
| 4789 |
} |
| 4790 |
|
| 4791 |
atomic_store_rel_int((volatile unsigned int *)&sc->mtu, |
| 4792 |
(unsigned long)ifr->ifr_mtu); |
| 4793 |
/* |
| 4794 |
atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), |
| 4795 |
(unsigned long)ifr->ifr_mtu); |
| 4796 |
XXX - Not sure why it needs to be atomic |
| 4797 |
*/ |
| 4798 |
if_setmtu(ifp, ifr->ifr_mtu); |
| 4799 |
reinit = 1; |
| 4800 |
break; |
| 4801 |
|
| 4802 |
case SIOCSIFFLAGS: |
| 4803 |
/* toggle the interface state up or down */ |
| 4804 |
BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); |
| 4805 |
|
| 4806 |
/* check if the interface is up */ |
| 4807 |
if (if_getflags(ifp) & IFF_UP) { |
| 4808 |
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { |
| 4809 |
/* set the receive mode flags */ |
| 4810 |
bxe_set_rx_mode(sc); |
| 4811 |
} else { |
| 4812 |
atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_START); |
| 4813 |
taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); |
| 4814 |
} |
| 4815 |
} else { |
| 4816 |
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { |
| 4817 |
atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_STOP); |
| 4818 |
taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); |
| 4819 |
} |
| 4820 |
} |
| 4821 |
|
| 4822 |
break; |
| 4823 |
|
| 4824 |
case SIOCADDMULTI: |
| 4825 |
case SIOCDELMULTI: |
| 4826 |
/* add/delete multicast addresses */ |
| 4827 |
BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); |
| 4828 |
|
| 4829 |
/* check if the interface is up */ |
| 4830 |
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { |
| 4831 |
/* set the receive mode flags */ |
| 4832 |
bxe_set_rx_mode(sc); |
| 4833 |
} |
| 4834 |
|
| 4835 |
break; |
| 4836 |
|
| 4837 |
case SIOCSIFCAP: |
| 4838 |
/* find out which capabilities have changed */ |
| 4839 |
mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); |
| 4840 |
|
| 4841 |
BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", |
| 4842 |
mask); |
| 4843 |
|
| 4844 |
/* toggle the LRO capabilites enable flag */ |
| 4845 |
if (mask & IFCAP_LRO) { |
| 4846 |
if_togglecapenable(ifp, IFCAP_LRO); |
| 4847 |
BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", |
| 4848 |
(if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); |
| 4849 |
reinit = 1; |
| 4850 |
} |
| 4851 |
|
| 4852 |
/* toggle the TXCSUM checksum capabilites enable flag */ |
| 4853 |
if (mask & IFCAP_TXCSUM) { |
| 4854 |
if_togglecapenable(ifp, IFCAP_TXCSUM); |
| 4855 |
BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", |
| 4856 |
(if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); |
| 4857 |
if (if_getcapenable(ifp) & IFCAP_TXCSUM) { |
| 4858 |
if_sethwassistbits(ifp, (CSUM_IP | |
| 4859 |
CSUM_TCP | |
| 4860 |
CSUM_UDP | |
| 4861 |
CSUM_TSO | |
| 4862 |
CSUM_TCP_IPV6 | |
| 4863 |
CSUM_UDP_IPV6), 0); |
| 4864 |
} else { |
| 4865 |
if_clearhwassist(ifp); /* XXX */ |
| 4866 |
} |
| 4867 |
} |
| 4868 |
|
| 4869 |
/* toggle the RXCSUM checksum capabilities enable flag */ |
| 4870 |
if (mask & IFCAP_RXCSUM) { |
| 4871 |
if_togglecapenable(ifp, IFCAP_RXCSUM); |
| 4872 |
BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", |
| 4873 |
(if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); |
| 4874 |
if (if_getcapenable(ifp) & IFCAP_RXCSUM) { |
| 4875 |
if_sethwassistbits(ifp, (CSUM_IP | |
| 4876 |
CSUM_TCP | |
| 4877 |
CSUM_UDP | |
| 4878 |
CSUM_TSO | |
| 4879 |
CSUM_TCP_IPV6 | |
| 4880 |
CSUM_UDP_IPV6), 0); |
| 4881 |
} else { |
| 4882 |
if_clearhwassist(ifp); /* XXX */ |
| 4883 |
} |
| 4884 |
} |
| 4885 |
|
| 4886 |
/* toggle TSO4 capabilities enabled flag */ |
| 4887 |
if (mask & IFCAP_TSO4) { |
| 4888 |
if_togglecapenable(ifp, IFCAP_TSO4); |
| 4889 |
BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", |
| 4890 |
(if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); |
| 4891 |
} |
| 4892 |
|
| 4893 |
/* toggle TSO6 capabilities enabled flag */ |
| 4894 |
if (mask & IFCAP_TSO6) { |
| 4895 |
if_togglecapenable(ifp, IFCAP_TSO6); |
| 4896 |
BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", |
| 4897 |
(if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); |
| 4898 |
} |
| 4899 |
|
| 4900 |
/* toggle VLAN_HWTSO capabilities enabled flag */ |
| 4901 |
if (mask & IFCAP_VLAN_HWTSO) { |
| 4902 |
|
| 4903 |
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); |
| 4904 |
BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", |
| 4905 |
(if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); |
| 4906 |
} |
| 4907 |
|
| 4908 |
/* toggle VLAN_HWCSUM capabilities enabled flag */ |
| 4909 |
if (mask & IFCAP_VLAN_HWCSUM) { |
| 4910 |
/* XXX investigate this... */ |
| 4911 |
BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); |
| 4912 |
error = EINVAL; |
| 4913 |
} |
| 4914 |
|
| 4915 |
/* toggle VLAN_MTU capabilities enable flag */ |
| 4916 |
if (mask & IFCAP_VLAN_MTU) { |
| 4917 |
/* XXX investigate this... */ |
| 4918 |
BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); |
| 4919 |
error = EINVAL; |
| 4920 |
} |
| 4921 |
|
| 4922 |
/* toggle VLAN_HWTAGGING capabilities enabled flag */ |
| 4923 |
if (mask & IFCAP_VLAN_HWTAGGING) { |
| 4924 |
/* XXX investigate this... */ |
| 4925 |
BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); |
| 4926 |
error = EINVAL; |
| 4927 |
} |
| 4928 |
|
| 4929 |
/* toggle VLAN_HWFILTER capabilities enabled flag */ |
| 4930 |
if (mask & IFCAP_VLAN_HWFILTER) { |
| 4931 |
/* XXX investigate this... */ |
| 4932 |
BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); |
| 4933 |
error = EINVAL; |
| 4934 |
} |
| 4935 |
|
| 4936 |
/* XXX not yet... |
| 4937 |
* IFCAP_WOL_MAGIC |
| 4938 |
*/ |
| 4939 |
|
| 4940 |
break; |
| 4941 |
|
| 4942 |
case SIOCSIFMEDIA: |
| 4943 |
case SIOCGIFMEDIA: |
| 4944 |
/* set/get interface media */ |
| 4945 |
BLOGD(sc, DBG_IOCTL, |
| 4946 |
"Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", |
| 4947 |
(command & 0xff)); |
| 4948 |
error = ifmedia_ioctl_drv(ifp, ifr, &sc->ifmedia, command); |
| 4949 |
break; |
| 4950 |
|
| 4951 |
case SIOCGPRIVATE_0: |
| 4952 |
copyin(ifr->ifr_data, &priv_op, sizeof(priv_op)); |
| 4953 |
|
| 4954 |
switch (priv_op) |
| 4955 |
{ |
| 4956 |
case BXE_IOC_RD_NVRAM: |
| 4957 |
case BXE_IOC_WR_NVRAM: |
| 4958 |
nvdata = (struct bxe_nvram_data *)ifr->ifr_data; |
| 4959 |
BLOGD(sc, DBG_IOCTL, |
| 4960 |
"Received Private NVRAM ioctl addr=0x%x size=%u\n", |
| 4961 |
nvdata->offset, nvdata->len); |
| 4962 |
error = bxe_ioctl_nvram(sc, priv_op, ifr); |
| 4963 |
break; |
| 4964 |
|
| 4965 |
case BXE_IOC_STATS_SHOW_NUM: |
| 4966 |
case BXE_IOC_STATS_SHOW_STR: |
| 4967 |
case BXE_IOC_STATS_SHOW_CNT: |
| 4968 |
BLOGD(sc, DBG_IOCTL, "Received Private Stats ioctl (%d)\n", |
| 4969 |
priv_op); |
| 4970 |
error = bxe_ioctl_stats_show(sc, priv_op, ifr); |
| 4971 |
break; |
| 4972 |
|
| 4973 |
default: |
| 4974 |
BLOGW(sc, "Received Private Unknown ioctl (%d)\n", priv_op); |
| 4975 |
error = EINVAL; |
| 4976 |
break; |
| 4977 |
} |
| 4978 |
|
| 4979 |
break; |
| 4980 |
|
| 4981 |
default: |
| 4982 |
BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", |
| 4983 |
(command & 0xff)); |
| 4984 |
error = ether_ioctl_drv(ifp, command, data); |
| 4985 |
break; |
| 4986 |
} |
| 4987 |
|
| 4988 |
if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { |
| 4989 |
BLOGD(sc, DBG_LOAD | DBG_IOCTL, |
| 4990 |
"Re-initializing hardware from IOCTL change\n"); |
| 4991 |
atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_REINIT); |
| 4992 |
taskqueue_enqueue(sc->chip_tq, &sc->chip_tq_task); |
| 4993 |
} |
| 4994 |
|
| 4995 |
return (error); |
| 4996 |
} |
| 4997 |
|
| 4998 |
static __noinline void |
| 4999 |
bxe_dump_mbuf(struct bxe_softc *sc, |
| 5000 |
struct mbuf *m, |
| 5001 |
uint8_t contents) |
| 5002 |
{ |
| 5003 |
char * type; |
| 5004 |
int i = 0; |
| 5005 |
|
| 5006 |
if (!(sc->debug & DBG_MBUF)) { |
| 5007 |
return; |
| 5008 |
} |
| 5009 |
|
| 5010 |
if (m == NULL) { |
| 5011 |
BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); |
| 5012 |
return; |
| 5013 |
} |
| 5014 |
|
| 5015 |
while (m) { |
| 5016 |
BLOGD(sc, DBG_MBUF, |
| 5017 |
"%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", |
| 5018 |
i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); |
| 5019 |
|
| 5020 |
if (m->m_flags & M_PKTHDR) { |
| 5021 |
BLOGD(sc, DBG_MBUF, |
| 5022 |
"%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", |
| 5023 |
i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, |
| 5024 |
(int)m->m_pkthdr.csum_flags, CSUM_BITS); |
| 5025 |
} |
| 5026 |
|
| 5027 |
if (m->m_flags & M_EXT) { |
| 5028 |
switch (m->m_ext.ext_type) { |
| 5029 |
case EXT_CLUSTER: type = "EXT_CLUSTER"; break; |
| 5030 |
case EXT_SFBUF: type = "EXT_SFBUF"; break; |
| 5031 |
case EXT_JUMBOP: type = "EXT_JUMBOP"; break; |
| 5032 |
case EXT_JUMBO9: type = "EXT_JUMBO9"; break; |
| 5033 |
case EXT_JUMBO16: type = "EXT_JUMBO16"; break; |
| 5034 |
case EXT_PACKET: type = "EXT_PACKET"; break; |
| 5035 |
case EXT_MBUF: type = "EXT_MBUF"; break; |
| 5036 |
case EXT_NET_DRV: type = "EXT_NET_DRV"; break; |
| 5037 |
case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; |
| 5038 |
case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; |
| 5039 |
case EXT_EXTREF: type = "EXT_EXTREF"; break; |
| 5040 |
default: type = "UNKNOWN"; break; |
| 5041 |
} |
| 5042 |
|
| 5043 |
BLOGD(sc, DBG_MBUF, |
| 5044 |
"%02d: - m_ext: %p ext_size=%d type=%s\n", |
| 5045 |
i, m->m_ext.ext_buf, m->m_ext.ext_size, type); |
| 5046 |
} |
| 5047 |
|
| 5048 |
if (contents) { |
| 5049 |
bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); |
| 5050 |
} |
| 5051 |
|
| 5052 |
m = m->m_next; |
| 5053 |
i++; |
| 5054 |
} |
| 5055 |
} |
| 5056 |
|
| 5057 |
/* |
| 5058 |
* Checks to ensure the 13 bd sliding window is >= MSS for TSO. |
| 5059 |
* Check that (13 total bds - 3 bds) = 10 bd window >= MSS. |
| 5060 |
* The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD |
| 5061 |
* The headers comes in a seperate bd in FreeBSD so 13-3=10. |
| 5062 |
* Returns: 0 if OK to send, 1 if packet needs further defragmentation |
| 5063 |
*/ |
| 5064 |
static int |
| 5065 |
bxe_chktso_window(struct bxe_softc *sc, |
| 5066 |
int nsegs, |
| 5067 |
bus_dma_segment_t *segs, |
| 5068 |
struct mbuf *m) |
| 5069 |
{ |
| 5070 |
uint32_t num_wnds, wnd_size, wnd_sum; |
| 5071 |
int32_t frag_idx, wnd_idx; |
| 5072 |
unsigned short lso_mss; |
| 5073 |
int defrag; |
| 5074 |
|
| 5075 |
defrag = 0; |
| 5076 |
wnd_sum = 0; |
| 5077 |
wnd_size = 10; |
| 5078 |
num_wnds = nsegs - wnd_size; |
| 5079 |
lso_mss = htole16(m->m_pkthdr.tso_segsz); |
| 5080 |
|
| 5081 |
/* |
| 5082 |
* Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the |
| 5083 |
* first window sum of data while skipping the first assuming it is the |
| 5084 |
* header in FreeBSD. |
| 5085 |
*/ |
| 5086 |
for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { |
| 5087 |
wnd_sum += htole16(segs[frag_idx].ds_len); |
| 5088 |
} |
| 5089 |
|
| 5090 |
/* check the first 10 bd window size */ |
| 5091 |
if (wnd_sum < lso_mss) { |
| 5092 |
return (1); |
| 5093 |
} |
| 5094 |
|
| 5095 |
/* run through the windows */ |
| 5096 |
for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { |
| 5097 |
/* subtract the first mbuf->m_len of the last wndw(-header) */ |
| 5098 |
wnd_sum -= htole16(segs[wnd_idx+1].ds_len); |
| 5099 |
/* add the next mbuf len to the len of our new window */ |
| 5100 |
wnd_sum += htole16(segs[frag_idx].ds_len); |
| 5101 |
if (wnd_sum < lso_mss) { |
| 5102 |
return (1); |
| 5103 |
} |
| 5104 |
} |
| 5105 |
|
| 5106 |
return (0); |
| 5107 |
} |
| 5108 |
|
| 5109 |
static uint8_t |
| 5110 |
bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, |
| 5111 |
struct mbuf *m, |
| 5112 |
uint32_t *parsing_data) |
| 5113 |
{ |
| 5114 |
struct ether_vlan_header *eh = NULL; |
| 5115 |
struct ip *ip4 = NULL; |
| 5116 |
struct ip6_hdr *ip6 = NULL; |
| 5117 |
caddr_t ip = NULL; |
| 5118 |
struct tcphdr *th = NULL; |
| 5119 |
int e_hlen, ip_hlen, l4_off; |
| 5120 |
uint16_t proto; |
| 5121 |
|
| 5122 |
if (m->m_pkthdr.csum_flags == CSUM_IP) { |
| 5123 |
/* no L4 checksum offload needed */ |
| 5124 |
return (0); |
| 5125 |
} |
| 5126 |
|
| 5127 |
/* get the Ethernet header */ |
| 5128 |
eh = mtod(m, struct ether_vlan_header *); |
| 5129 |
|
| 5130 |
/* handle VLAN encapsulation if present */ |
| 5131 |
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { |
| 5132 |
e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); |
| 5133 |
proto = ntohs(eh->evl_proto); |
| 5134 |
} else { |
| 5135 |
e_hlen = ETHER_HDR_LEN; |
| 5136 |
proto = ntohs(eh->evl_encap_proto); |
| 5137 |
} |
| 5138 |
|
| 5139 |
switch (proto) { |
| 5140 |
case ETHERTYPE_IP: |
| 5141 |
/* get the IP header, if mbuf len < 20 then header in next mbuf */ |
| 5142 |
ip4 = (m->m_len < sizeof(struct ip)) ? |
| 5143 |
(struct ip *)m->m_next->m_data : |
| 5144 |
(struct ip *)(m->m_data + e_hlen); |
| 5145 |
/* ip_hl is number of 32-bit words */ |
| 5146 |
ip_hlen = (ip4->ip_hl << 2); |
| 5147 |
ip = (caddr_t)ip4; |
| 5148 |
break; |
| 5149 |
case ETHERTYPE_IPV6: |
| 5150 |
/* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ |
| 5151 |
ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? |
| 5152 |
(struct ip6_hdr *)m->m_next->m_data : |
| 5153 |
(struct ip6_hdr *)(m->m_data + e_hlen); |
| 5154 |
/* XXX cannot support offload with IPv6 extensions */ |
| 5155 |
ip_hlen = sizeof(struct ip6_hdr); |
| 5156 |
ip = (caddr_t)ip6; |
| 5157 |
break; |
| 5158 |
default: |
| 5159 |
/* We can't offload in this case... */ |
| 5160 |
/* XXX error stat ??? */ |
| 5161 |
return (0); |
| 5162 |
} |
| 5163 |
|
| 5164 |
/* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ |
| 5165 |
l4_off = (e_hlen + ip_hlen); |
| 5166 |
|
| 5167 |
*parsing_data |= |
| 5168 |
(((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & |
| 5169 |
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); |
| 5170 |
|
| 5171 |
if (m->m_pkthdr.csum_flags & (CSUM_TCP | |
| 5172 |
CSUM_TSO | |
| 5173 |
CSUM_TCP_IPV6)) { |
| 5174 |
fp->eth_q_stats.tx_ofld_frames_csum_tcp++; |
| 5175 |
th = (struct tcphdr *)(ip + ip_hlen); |
| 5176 |
/* th_off is number of 32-bit words */ |
| 5177 |
*parsing_data |= ((th->th_off << |
| 5178 |
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & |
| 5179 |
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); |
| 5180 |
return (l4_off + (th->th_off << 2)); /* entire header length */ |
| 5181 |
} else if (m->m_pkthdr.csum_flags & (CSUM_UDP | |
| 5182 |
CSUM_UDP_IPV6)) { |
| 5183 |
fp->eth_q_stats.tx_ofld_frames_csum_udp++; |
| 5184 |
return (l4_off + sizeof(struct udphdr)); /* entire header length */ |
| 5185 |
} else { |
| 5186 |
/* XXX error stat ??? */ |
| 5187 |
return (0); |
| 5188 |
} |
| 5189 |
} |
| 5190 |
|
| 5191 |
static uint8_t |
| 5192 |
bxe_set_pbd_csum(struct bxe_fastpath *fp, |
| 5193 |
struct mbuf *m, |
| 5194 |
struct eth_tx_parse_bd_e1x *pbd) |
| 5195 |
{ |
| 5196 |
struct ether_vlan_header *eh = NULL; |
| 5197 |
struct ip *ip4 = NULL; |
| 5198 |
struct ip6_hdr *ip6 = NULL; |
| 5199 |
caddr_t ip = NULL; |
| 5200 |
struct tcphdr *th = NULL; |
| 5201 |
struct udphdr *uh = NULL; |
| 5202 |
int e_hlen, ip_hlen; |
| 5203 |
uint16_t proto; |
| 5204 |
uint8_t hlen; |
| 5205 |
uint16_t tmp_csum; |
| 5206 |
uint32_t *tmp_uh; |
| 5207 |
|
| 5208 |
/* get the Ethernet header */ |
| 5209 |
eh = mtod(m, struct ether_vlan_header *); |
| 5210 |
|
| 5211 |
/* handle VLAN encapsulation if present */ |
| 5212 |
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { |
| 5213 |
e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); |
| 5214 |
proto = ntohs(eh->evl_proto); |
| 5215 |
} else { |
| 5216 |
e_hlen = ETHER_HDR_LEN; |
| 5217 |
proto = ntohs(eh->evl_encap_proto); |
| 5218 |
} |
| 5219 |
|
| 5220 |
switch (proto) { |
| 5221 |
case ETHERTYPE_IP: |
| 5222 |
/* get the IP header, if mbuf len < 20 then header in next mbuf */ |
| 5223 |
ip4 = (m->m_len < sizeof(struct ip)) ? |
| 5224 |
(struct ip *)m->m_next->m_data : |
| 5225 |
(struct ip *)(m->m_data + e_hlen); |
| 5226 |
/* ip_hl is number of 32-bit words */ |
| 5227 |
ip_hlen = (ip4->ip_hl << 1); |
| 5228 |
ip = (caddr_t)ip4; |
| 5229 |
break; |
| 5230 |
case ETHERTYPE_IPV6: |
| 5231 |
/* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ |
| 5232 |
ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? |
| 5233 |
(struct ip6_hdr *)m->m_next->m_data : |
| 5234 |
(struct ip6_hdr *)(m->m_data + e_hlen); |
| 5235 |
/* XXX cannot support offload with IPv6 extensions */ |
| 5236 |
ip_hlen = (sizeof(struct ip6_hdr) >> 1); |
| 5237 |
ip = (caddr_t)ip6; |
| 5238 |
break; |
| 5239 |
default: |
| 5240 |
/* We can't offload in this case... */ |
| 5241 |
/* XXX error stat ??? */ |
| 5242 |
return (0); |
| 5243 |
} |
| 5244 |
|
| 5245 |
hlen = (e_hlen >> 1); |
| 5246 |
|
| 5247 |
/* note that rest of global_data is indirectly zeroed here */ |
| 5248 |
if (m->m_flags & M_VLANTAG) { |
| 5249 |
pbd->global_data = |
| 5250 |
htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); |
| 5251 |
} else { |
| 5252 |
pbd->global_data = htole16(hlen); |
| 5253 |
} |
| 5254 |
|
| 5255 |
pbd->ip_hlen_w = ip_hlen; |
| 5256 |
|
| 5257 |
hlen += pbd->ip_hlen_w; |
| 5258 |
|
| 5259 |
/* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ |
| 5260 |
|
| 5261 |
if (m->m_pkthdr.csum_flags & (CSUM_TCP | |
| 5262 |
CSUM_TSO | |
| 5263 |
CSUM_TCP_IPV6)) { |
| 5264 |
th = (struct tcphdr *)(ip + (ip_hlen << 1)); |
| 5265 |
/* th_off is number of 32-bit words */ |
| 5266 |
hlen += (uint16_t)(th->th_off << 1); |
| 5267 |
} else if (m->m_pkthdr.csum_flags & (CSUM_UDP | |
| 5268 |
CSUM_UDP_IPV6)) { |
| 5269 |
uh = (struct udphdr *)(ip + (ip_hlen << 1)); |
| 5270 |
hlen += (sizeof(struct udphdr) / 2); |
| 5271 |
} else { |
| 5272 |
/* valid case as only CSUM_IP was set */ |
| 5273 |
return (0); |
| 5274 |
} |
| 5275 |
|
| 5276 |
pbd->total_hlen_w = htole16(hlen); |
| 5277 |
|
| 5278 |
if (m->m_pkthdr.csum_flags & (CSUM_TCP | |
| 5279 |
CSUM_TSO | |
| 5280 |
CSUM_TCP_IPV6)) { |
| 5281 |
fp->eth_q_stats.tx_ofld_frames_csum_tcp++; |
| 5282 |
pbd->tcp_pseudo_csum = ntohs(th->th_sum); |
| 5283 |
} else if (m->m_pkthdr.csum_flags & (CSUM_UDP | |
| 5284 |
CSUM_UDP_IPV6)) { |
| 5285 |
fp->eth_q_stats.tx_ofld_frames_csum_udp++; |
| 5286 |
|
| 5287 |
/* |
| 5288 |
* Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP |
| 5289 |
* checksums and does not know anything about the UDP header and where |
| 5290 |
* the checksum field is located. It only knows about TCP. Therefore |
| 5291 |
* we "lie" to the hardware for outgoing UDP packets w/ checksum |
| 5292 |
* offload. Since the checksum field offset for TCP is 16 bytes and |
| 5293 |
* for UDP it is 6 bytes we pass a pointer to the hardware that is 10 |
| 5294 |
* bytes less than the start of the UDP header. This allows the |
| 5295 |
* hardware to write the checksum in the correct spot. But the |
| 5296 |
* hardware will compute a checksum which includes the last 10 bytes |
| 5297 |
* of the IP header. To correct this we tweak the stack computed |
| 5298 |
* pseudo checksum by folding in the calculation of the inverse |
| 5299 |
* checksum for those final 10 bytes of the IP header. This allows |
| 5300 |
* the correct checksum to be computed by the hardware. |
| 5301 |
*/ |
| 5302 |
|
| 5303 |
/* set pointer 10 bytes before UDP header */ |
| 5304 |
tmp_uh = (uint32_t *)((uint8_t *)uh - 10); |
| 5305 |
|
| 5306 |
/* calculate a pseudo header checksum over the first 10 bytes */ |
| 5307 |
tmp_csum = in_pseudo(*tmp_uh, |
| 5308 |
*(tmp_uh + 1), |
| 5309 |
*(uint16_t *)(tmp_uh + 2)); |
| 5310 |
|
| 5311 |
pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); |
| 5312 |
} |
| 5313 |
|
| 5314 |
return (hlen * 2); /* entire header length, number of bytes */ |
| 5315 |
} |
| 5316 |
|
| 5317 |
static void |
| 5318 |
bxe_set_pbd_lso_e2(struct mbuf *m, |
| 5319 |
uint32_t *parsing_data) |
| 5320 |
{ |
| 5321 |
*parsing_data |= ((m->m_pkthdr.tso_segsz << |
| 5322 |
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & |
| 5323 |
ETH_TX_PARSE_BD_E2_LSO_MSS); |
| 5324 |
|
| 5325 |
/* XXX test for IPv6 with extension header... */ |
| 5326 |
#if 0 |
| 5327 |
struct ip6_hdr *ip6; |
| 5328 |
if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header') |
| 5329 |
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; |
| 5330 |
#endif |
| 5331 |
} |
| 5332 |
|
| 5333 |
static void |
| 5334 |
bxe_set_pbd_lso(struct mbuf *m, |
| 5335 |
struct eth_tx_parse_bd_e1x *pbd) |
| 5336 |
{ |
| 5337 |
struct ether_vlan_header *eh = NULL; |
| 5338 |
struct ip *ip = NULL; |
| 5339 |
struct tcphdr *th = NULL; |
| 5340 |
int e_hlen; |
| 5341 |
|
| 5342 |
/* get the Ethernet header */ |
| 5343 |
eh = mtod(m, struct ether_vlan_header *); |
| 5344 |
|
| 5345 |
/* handle VLAN encapsulation if present */ |
| 5346 |
e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? |
| 5347 |
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; |
| 5348 |
|
| 5349 |
/* get the IP and TCP header, with LSO entire header in first mbuf */ |
| 5350 |
/* XXX assuming IPv4 */ |
| 5351 |
ip = (struct ip *)(m->m_data + e_hlen); |
| 5352 |
th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); |
| 5353 |
|
| 5354 |
pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); |
| 5355 |
pbd->tcp_send_seq = ntohl(th->th_seq); |
| 5356 |
pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); |
| 5357 |
|
| 5358 |
#if 1 |
| 5359 |
/* XXX IPv4 */ |
| 5360 |
pbd->ip_id = ntohs(ip->ip_id); |
| 5361 |
pbd->tcp_pseudo_csum = |
| 5362 |
ntohs(in_pseudo(ip->ip_src.s_addr, |
| 5363 |
ip->ip_dst.s_addr, |
| 5364 |
htons(IPPROTO_TCP))); |
| 5365 |
#else |
| 5366 |
/* XXX IPv6 */ |
| 5367 |
pbd->tcp_pseudo_csum = |
| 5368 |
ntohs(in_pseudo(&ip6->ip6_src, |
| 5369 |
&ip6->ip6_dst, |
| 5370 |
htons(IPPROTO_TCP))); |
| 5371 |
#endif |
| 5372 |
|
| 5373 |
pbd->global_data |= |
| 5374 |
htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); |
| 5375 |
} |
| 5376 |
|
| 5377 |
/* |
| 5378 |
* Encapsulte an mbuf cluster into the tx bd chain and makes the memory |
| 5379 |
* visible to the controller. |
| 5380 |
* |
| 5381 |
* If an mbuf is submitted to this routine and cannot be given to the |
| 5382 |
* controller (e.g. it has too many fragments) then the function may free |
| 5383 |
* the mbuf and return to the caller. |
| 5384 |
* |
| 5385 |
* Returns: |
| 5386 |
* 0 = Success, !0 = Failure |
| 5387 |
* Note the side effect that an mbuf may be freed if it causes a problem. |
| 5388 |
*/ |
| 5389 |
static int |
| 5390 |
bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) |
| 5391 |
{ |
| 5392 |
bus_dma_segment_t segs[32]; |
| 5393 |
struct mbuf *m0; |
| 5394 |
struct bxe_sw_tx_bd *tx_buf; |
| 5395 |
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; |
| 5396 |
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; |
| 5397 |
/* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ |
| 5398 |
struct eth_tx_bd *tx_data_bd; |
| 5399 |
struct eth_tx_bd *tx_total_pkt_size_bd; |
| 5400 |
struct eth_tx_start_bd *tx_start_bd; |
| 5401 |
uint16_t bd_prod, pkt_prod, total_pkt_size; |
| 5402 |
uint8_t mac_type; |
| 5403 |
int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; |
| 5404 |
struct bxe_softc *sc; |
| 5405 |
uint16_t tx_bd_avail; |
| 5406 |
struct ether_vlan_header *eh; |
| 5407 |
uint32_t pbd_e2_parsing_data = 0; |
| 5408 |
uint8_t hlen = 0; |
| 5409 |
int tmp_bd; |
| 5410 |
int i; |
| 5411 |
|
| 5412 |
sc = fp->sc; |
| 5413 |
|
| 5414 |
M_ASSERTPKTHDR(*m_head); |
| 5415 |
|
| 5416 |
m0 = *m_head; |
| 5417 |
rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; |
| 5418 |
tx_start_bd = NULL; |
| 5419 |
tx_data_bd = NULL; |
| 5420 |
tx_total_pkt_size_bd = NULL; |
| 5421 |
|
| 5422 |
/* get the H/W pointer for packets and BDs */ |
| 5423 |
pkt_prod = fp->tx_pkt_prod; |
| 5424 |
bd_prod = fp->tx_bd_prod; |
| 5425 |
|
| 5426 |
mac_type = UNICAST_ADDRESS; |
| 5427 |
|
| 5428 |
/* map the mbuf into the next open DMAable memory */ |
| 5429 |
tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; |
| 5430 |
error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, |
| 5431 |
tx_buf->m_map, m0, |
| 5432 |
segs, &nsegs, BUS_DMA_NOWAIT); |
| 5433 |
|
| 5434 |
/* mapping errors */ |
| 5435 |
if(__predict_false(error != 0)) { |
| 5436 |
fp->eth_q_stats.tx_dma_mapping_failure++; |
| 5437 |
if (error == ENOMEM) { |
| 5438 |
/* resource issue, try again later */ |
| 5439 |
rc = ENOMEM; |
| 5440 |
} else if (error == EFBIG) { |
| 5441 |
/* possibly recoverable with defragmentation */ |
| 5442 |
fp->eth_q_stats.mbuf_defrag_attempts++; |
| 5443 |
m0 = m_defrag(*m_head, M_NOWAIT); |
| 5444 |
if (m0 == NULL) { |
| 5445 |
fp->eth_q_stats.mbuf_defrag_failures++; |
| 5446 |
rc = ENOBUFS; |
| 5447 |
} else { |
| 5448 |
/* defrag successful, try mapping again */ |
| 5449 |
*m_head = m0; |
| 5450 |
error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, |
| 5451 |
tx_buf->m_map, m0, |
| 5452 |
segs, &nsegs, BUS_DMA_NOWAIT); |
| 5453 |
if (error) { |
| 5454 |
fp->eth_q_stats.tx_dma_mapping_failure++; |
| 5455 |
rc = error; |
| 5456 |
} |
| 5457 |
} |
| 5458 |
} else { |
| 5459 |
/* unknown, unrecoverable mapping error */ |
| 5460 |
BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); |
| 5461 |
bxe_dump_mbuf(sc, m0, FALSE); |
| 5462 |
rc = error; |
| 5463 |
} |
| 5464 |
|
| 5465 |
goto bxe_tx_encap_continue; |
| 5466 |
} |
| 5467 |
|
| 5468 |
tx_bd_avail = bxe_tx_avail(sc, fp); |
| 5469 |
|
| 5470 |
/* make sure there is enough room in the send queue */ |
| 5471 |
if (__predict_false(tx_bd_avail < (nsegs + 2))) { |
| 5472 |
/* Recoverable, try again later. */ |
| 5473 |
fp->eth_q_stats.tx_hw_queue_full++; |
| 5474 |
bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); |
| 5475 |
rc = ENOMEM; |
| 5476 |
goto bxe_tx_encap_continue; |
| 5477 |
} |
| 5478 |
|
| 5479 |
/* capture the current H/W TX chain high watermark */ |
| 5480 |
if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < |
| 5481 |
(TX_BD_USABLE - tx_bd_avail))) { |
| 5482 |
fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); |
| 5483 |
} |
| 5484 |
|
| 5485 |
/* make sure it fits in the packet window */ |
| 5486 |
if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { |
| 5487 |
/* |
| 5488 |
* The mbuf may be to big for the controller to handle. If the frame |
| 5489 |
* is a TSO frame we'll need to do an additional check. |
| 5490 |
*/ |
| 5491 |
if (m0->m_pkthdr.csum_flags & CSUM_TSO) { |
| 5492 |
if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { |
| 5493 |
goto bxe_tx_encap_continue; /* OK to send */ |
| 5494 |
} else { |
| 5495 |
fp->eth_q_stats.tx_window_violation_tso++; |
| 5496 |
} |
| 5497 |
} else { |
| 5498 |
fp->eth_q_stats.tx_window_violation_std++; |
| 5499 |
} |
| 5500 |
|
| 5501 |
/* lets try to defragment this mbuf and remap it */ |
| 5502 |
fp->eth_q_stats.mbuf_defrag_attempts++; |
| 5503 |
bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); |
| 5504 |
|
| 5505 |
m0 = m_defrag(*m_head, M_NOWAIT); |
| 5506 |
if (m0 == NULL) { |
| 5507 |
fp->eth_q_stats.mbuf_defrag_failures++; |
| 5508 |
/* Ugh, just drop the frame... :( */ |
| 5509 |
rc = ENOBUFS; |
| 5510 |
} else { |
| 5511 |
/* defrag successful, try mapping again */ |
| 5512 |
*m_head = m0; |
| 5513 |
error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, |
| 5514 |
tx_buf->m_map, m0, |
| 5515 |
segs, &nsegs, BUS_DMA_NOWAIT); |
| 5516 |
if (error) { |
| 5517 |
fp->eth_q_stats.tx_dma_mapping_failure++; |
| 5518 |
/* No sense in trying to defrag/copy chain, drop it. :( */ |
| 5519 |
rc = error; |
| 5520 |
} |
| 5521 |
else { |
| 5522 |
/* if the chain is still too long then drop it */ |
| 5523 |
if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { |
| 5524 |
bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); |
| 5525 |
rc = ENODEV; |
| 5526 |
} |
| 5527 |
} |
| 5528 |
} |
| 5529 |
} |
| 5530 |
|
| 5531 |
bxe_tx_encap_continue: |
| 5532 |
|
| 5533 |
/* Check for errors */ |
| 5534 |
if (rc) { |
| 5535 |
if (rc == ENOMEM) { |
| 5536 |
/* recoverable try again later */ |
| 5537 |
} else { |
| 5538 |
fp->eth_q_stats.tx_soft_errors++; |
| 5539 |
fp->eth_q_stats.mbuf_alloc_tx--; |
| 5540 |
m_freem(*m_head); |
| 5541 |
*m_head = NULL; |
| 5542 |
} |
| 5543 |
|
| 5544 |
return (rc); |
| 5545 |
} |
| 5546 |
|
| 5547 |
/* set flag according to packet type (UNICAST_ADDRESS is default) */ |
| 5548 |
if (m0->m_flags & M_BCAST) { |
| 5549 |
mac_type = BROADCAST_ADDRESS; |
| 5550 |
} else if (m0->m_flags & M_MCAST) { |
| 5551 |
mac_type = MULTICAST_ADDRESS; |
| 5552 |
} |
| 5553 |
|
| 5554 |
/* store the mbuf into the mbuf ring */ |
| 5555 |
tx_buf->m = m0; |
| 5556 |
tx_buf->first_bd = fp->tx_bd_prod; |
| 5557 |
tx_buf->flags = 0; |
| 5558 |
|
| 5559 |
/* prepare the first transmit (start) BD for the mbuf */ |
| 5560 |
tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; |
| 5561 |
|
| 5562 |
BLOGD(sc, DBG_TX, |
| 5563 |
"sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", |
| 5564 |
pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); |
| 5565 |
|
| 5566 |
tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); |
| 5567 |
tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); |
| 5568 |
tx_start_bd->nbytes = htole16(segs[0].ds_len); |
| 5569 |
total_pkt_size += tx_start_bd->nbytes; |
| 5570 |
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; |
| 5571 |
|
| 5572 |
tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); |
| 5573 |
|
| 5574 |
/* all frames have at least Start BD + Parsing BD */ |
| 5575 |
nbds = nsegs + 1; |
| 5576 |
tx_start_bd->nbd = htole16(nbds); |
| 5577 |
|
| 5578 |
if (m0->m_flags & M_VLANTAG) { |
| 5579 |
tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); |
| 5580 |
tx_start_bd->bd_flags.as_bitfield |= |
| 5581 |
(X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); |
| 5582 |
} else { |
| 5583 |
/* vf tx, start bd must hold the ethertype for fw to enforce it */ |
| 5584 |
if (IS_VF(sc)) { |
| 5585 |
/* map ethernet header to find type and header length */ |
| 5586 |
eh = mtod(m0, struct ether_vlan_header *); |
| 5587 |
tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; |
| 5588 |
} else { |
| 5589 |
/* used by FW for packet accounting */ |
| 5590 |
tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); |
| 5591 |
#if 0 |
| 5592 |
/* |
| 5593 |
* If NPAR-SD is active then FW should do the tagging regardless |
| 5594 |
* of value of priority. Otherwise, if priority indicates this is |
| 5595 |
* a control packet we need to indicate to FW to avoid tagging. |
| 5596 |
*/ |
| 5597 |
if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) { |
| 5598 |
SET_FLAG(tx_start_bd->general_data, |
| 5599 |
ETH_TX_START_BD_FORCE_VLAN_MODE, 1); |
| 5600 |
} |
| 5601 |
#endif |
| 5602 |
} |
| 5603 |
} |
| 5604 |
|
| 5605 |
/* |
| 5606 |
* add a parsing BD from the chain. The parsing BD is always added |
| 5607 |
* though it is only used for TSO and chksum |
| 5608 |
*/ |
| 5609 |
bd_prod = TX_BD_NEXT(bd_prod); |
| 5610 |
|
| 5611 |
if (m0->m_pkthdr.csum_flags) { |
| 5612 |
if (m0->m_pkthdr.csum_flags & CSUM_IP) { |
| 5613 |
fp->eth_q_stats.tx_ofld_frames_csum_ip++; |
| 5614 |
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; |
| 5615 |
} |
| 5616 |
|
| 5617 |
if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { |
| 5618 |
tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | |
| 5619 |
ETH_TX_BD_FLAGS_L4_CSUM); |
| 5620 |
} else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { |
| 5621 |
tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | |
| 5622 |
ETH_TX_BD_FLAGS_IS_UDP | |
| 5623 |
ETH_TX_BD_FLAGS_L4_CSUM); |
| 5624 |
} else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || |
| 5625 |
(m0->m_pkthdr.csum_flags & CSUM_TSO)) { |
| 5626 |
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; |
| 5627 |
} else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { |
| 5628 |
tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | |
| 5629 |
ETH_TX_BD_FLAGS_IS_UDP); |
| 5630 |
} |
| 5631 |
} |
| 5632 |
|
| 5633 |
if (!CHIP_IS_E1x(sc)) { |
| 5634 |
pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; |
| 5635 |
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); |
| 5636 |
|
| 5637 |
if (m0->m_pkthdr.csum_flags) { |
| 5638 |
hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); |
| 5639 |
} |
| 5640 |
|
| 5641 |
#if 0 |
| 5642 |
/* |
| 5643 |
* Add the MACs to the parsing BD if the module param was |
| 5644 |
* explicitly set, if this is a vf, or in switch independent |
| 5645 |
* mode. |
| 5646 |
*/ |
| 5647 |
if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) { |
| 5648 |
eh = mtod(m0, struct ether_vlan_header *); |
| 5649 |
bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, |
| 5650 |
&pbd_e2->data.mac_addr.src_mid, |
| 5651 |
&pbd_e2->data.mac_addr.src_lo, |
| 5652 |
eh->evl_shost); |
| 5653 |
bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, |
| 5654 |
&pbd_e2->data.mac_addr.dst_mid, |
| 5655 |
&pbd_e2->data.mac_addr.dst_lo, |
| 5656 |
eh->evl_dhost); |
| 5657 |
} |
| 5658 |
#endif |
| 5659 |
|
| 5660 |
SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, |
| 5661 |
mac_type); |
| 5662 |
} else { |
| 5663 |
uint16_t global_data = 0; |
| 5664 |
|
| 5665 |
pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; |
| 5666 |
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); |
| 5667 |
|
| 5668 |
if (m0->m_pkthdr.csum_flags) { |
| 5669 |
hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); |
| 5670 |
} |
| 5671 |
|
| 5672 |
SET_FLAG(global_data, |
| 5673 |
ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); |
| 5674 |
pbd_e1x->global_data |= htole16(global_data); |
| 5675 |
} |
| 5676 |
|
| 5677 |
/* setup the parsing BD with TSO specific info */ |
| 5678 |
if (m0->m_pkthdr.csum_flags & CSUM_TSO) { |
| 5679 |
fp->eth_q_stats.tx_ofld_frames_lso++; |
| 5680 |
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; |
| 5681 |
|
| 5682 |
if (__predict_false(tx_start_bd->nbytes > hlen)) { |
| 5683 |
fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; |
| 5684 |
|
| 5685 |
/* split the first BD into header/data making the fw job easy */ |
| 5686 |
nbds++; |
| 5687 |
tx_start_bd->nbd = htole16(nbds); |
| 5688 |
tx_start_bd->nbytes = htole16(hlen); |
| 5689 |
|
| 5690 |
bd_prod = TX_BD_NEXT(bd_prod); |
| 5691 |
|
| 5692 |
/* new transmit BD after the tx_parse_bd */ |
| 5693 |
tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; |
| 5694 |
tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); |
| 5695 |
tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); |
| 5696 |
tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); |
| 5697 |
if (tx_total_pkt_size_bd == NULL) { |
| 5698 |
tx_total_pkt_size_bd = tx_data_bd; |
| 5699 |
} |
| 5700 |
|
| 5701 |
BLOGD(sc, DBG_TX, |
| 5702 |
"TSO split header size is %d (%x:%x) nbds %d\n", |
| 5703 |
le16toh(tx_start_bd->nbytes), |
| 5704 |
le32toh(tx_start_bd->addr_hi), |
| 5705 |
le32toh(tx_start_bd->addr_lo), |
| 5706 |
nbds); |
| 5707 |
} |
| 5708 |
|
| 5709 |
if (!CHIP_IS_E1x(sc)) { |
| 5710 |
bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); |
| 5711 |
} else { |
| 5712 |
bxe_set_pbd_lso(m0, pbd_e1x); |
| 5713 |
} |
| 5714 |
} |
| 5715 |
|
| 5716 |
if (pbd_e2_parsing_data) { |
| 5717 |
pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); |
| 5718 |
} |
| 5719 |
|
| 5720 |
/* prepare remaining BDs, start tx bd contains first seg/frag */ |
| 5721 |
for (i = 1; i < nsegs ; i++) { |
| 5722 |
bd_prod = TX_BD_NEXT(bd_prod); |
| 5723 |
tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; |
| 5724 |
tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); |
| 5725 |
tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); |
| 5726 |
tx_data_bd->nbytes = htole16(segs[i].ds_len); |
| 5727 |
if (tx_total_pkt_size_bd == NULL) { |
| 5728 |
tx_total_pkt_size_bd = tx_data_bd; |
| 5729 |
} |
| 5730 |
total_pkt_size += tx_data_bd->nbytes; |
| 5731 |
} |
| 5732 |
|
| 5733 |
BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); |
| 5734 |
|
| 5735 |
if (tx_total_pkt_size_bd != NULL) { |
| 5736 |
tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; |
| 5737 |
} |
| 5738 |
|
| 5739 |
if (__predict_false(sc->debug & DBG_TX)) { |
| 5740 |
tmp_bd = tx_buf->first_bd; |
| 5741 |
for (i = 0; i < nbds; i++) |
| 5742 |
{ |
| 5743 |
if (i == 0) { |
| 5744 |
BLOGD(sc, DBG_TX, |
| 5745 |
"TX Strt: %p bd=%d nbd=%d vlan=0x%x " |
| 5746 |
"bd_flags=0x%x hdr_nbds=%d\n", |
| 5747 |
tx_start_bd, |
| 5748 |
tmp_bd, |
| 5749 |
le16toh(tx_start_bd->nbd), |
| 5750 |
le16toh(tx_start_bd->vlan_or_ethertype), |
| 5751 |
tx_start_bd->bd_flags.as_bitfield, |
| 5752 |
(tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); |
| 5753 |
} else if (i == 1) { |
| 5754 |
if (pbd_e1x) { |
| 5755 |
BLOGD(sc, DBG_TX, |
| 5756 |
"-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " |
| 5757 |
"ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " |
| 5758 |
"tcp_seq=%u total_hlen_w=%u\n", |
| 5759 |
pbd_e1x, |
| 5760 |
tmp_bd, |
| 5761 |
pbd_e1x->global_data, |
| 5762 |
pbd_e1x->ip_hlen_w, |
| 5763 |
pbd_e1x->ip_id, |
| 5764 |
pbd_e1x->lso_mss, |
| 5765 |
pbd_e1x->tcp_flags, |
| 5766 |
pbd_e1x->tcp_pseudo_csum, |
| 5767 |
pbd_e1x->tcp_send_seq, |
| 5768 |
le16toh(pbd_e1x->total_hlen_w)); |
| 5769 |
} else { /* if (pbd_e2) */ |
| 5770 |
BLOGD(sc, DBG_TX, |
| 5771 |
"-> Parse: %p bd=%d dst=%02x:%02x:%02x " |
| 5772 |
"src=%02x:%02x:%02x parsing_data=0x%x\n", |
| 5773 |
pbd_e2, |
| 5774 |
tmp_bd, |
| 5775 |
pbd_e2->data.mac_addr.dst_hi, |
| 5776 |
pbd_e2->data.mac_addr.dst_mid, |
| 5777 |
pbd_e2->data.mac_addr.dst_lo, |
| 5778 |
pbd_e2->data.mac_addr.src_hi, |
| 5779 |
pbd_e2->data.mac_addr.src_mid, |
| 5780 |
pbd_e2->data.mac_addr.src_lo, |
| 5781 |
pbd_e2->parsing_data); |
| 5782 |
} |
| 5783 |
} |
| 5784 |
|
| 5785 |
if (i != 1) { /* skip parse db as it doesn't hold data */ |
| 5786 |
tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; |
| 5787 |
BLOGD(sc, DBG_TX, |
| 5788 |
"-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", |
| 5789 |
tx_data_bd, |
| 5790 |
tmp_bd, |
| 5791 |
le16toh(tx_data_bd->nbytes), |
| 5792 |
le32toh(tx_data_bd->addr_hi), |
| 5793 |
le32toh(tx_data_bd->addr_lo)); |
| 5794 |
} |
| 5795 |
|
| 5796 |
tmp_bd = TX_BD_NEXT(tmp_bd); |
| 5797 |
} |
| 5798 |
} |
| 5799 |
|
| 5800 |
BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); |
| 5801 |
|
| 5802 |
/* update TX BD producer index value for next TX */ |
| 5803 |
bd_prod = TX_BD_NEXT(bd_prod); |
| 5804 |
|
| 5805 |
/* |
| 5806 |
* If the chain of tx_bd's describing this frame is adjacent to or spans |
| 5807 |
* an eth_tx_next_bd element then we need to increment the nbds value. |
| 5808 |
*/ |
| 5809 |
if (TX_BD_IDX(bd_prod) < nbds) { |
| 5810 |
nbds++; |
| 5811 |
} |
| 5812 |
|
| 5813 |
/* don't allow reordering of writes for nbd and packets */ |
| 5814 |
mb(); |
| 5815 |
|
| 5816 |
fp->tx_db.data.prod += nbds; |
| 5817 |
|
| 5818 |
/* producer points to the next free tx_bd at this point */ |
| 5819 |
fp->tx_pkt_prod++; |
| 5820 |
fp->tx_bd_prod = bd_prod; |
| 5821 |
|
| 5822 |
DOORBELL(sc, fp->index, fp->tx_db.raw); |
| 5823 |
|
| 5824 |
fp->eth_q_stats.tx_pkts++; |
| 5825 |
|
| 5826 |
/* Prevent speculative reads from getting ahead of the status block. */ |
| 5827 |
bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, |
| 5828 |
0, 0, BUS_SPACE_BARRIER_READ); |
| 5829 |
|
| 5830 |
/* Prevent speculative reads from getting ahead of the doorbell. */ |
| 5831 |
bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, |
| 5832 |
0, 0, BUS_SPACE_BARRIER_READ); |
| 5833 |
|
| 5834 |
return (0); |
| 5835 |
} |
| 5836 |
|
| 5837 |
static void |
| 5838 |
bxe_tx_start_locked(struct bxe_softc *sc, |
| 5839 |
if_t ifp, |
| 5840 |
struct bxe_fastpath *fp) |
| 5841 |
{ |
| 5842 |
struct mbuf *m = NULL; |
| 5843 |
int tx_count = 0; |
| 5844 |
uint16_t tx_bd_avail; |
| 5845 |
|
| 5846 |
BXE_FP_TX_LOCK_ASSERT(fp); |
| 5847 |
|
| 5848 |
/* keep adding entries while there are frames to send */ |
| 5849 |
while (!if_sendq_empty(ifp)) { |
| 5850 |
|
| 5851 |
/* |
| 5852 |
* check for any frames to send |
| 5853 |
* dequeue can still be NULL even if queue is not empty |
| 5854 |
*/ |
| 5855 |
m = if_dequeue(ifp); |
| 5856 |
if (__predict_false(m == NULL)) { |
| 5857 |
break; |
| 5858 |
} |
| 5859 |
|
| 5860 |
/* the mbuf now belongs to us */ |
| 5861 |
fp->eth_q_stats.mbuf_alloc_tx++; |
| 5862 |
|
| 5863 |
/* |
| 5864 |
* Put the frame into the transmit ring. If we don't have room, |
| 5865 |
* place the mbuf back at the head of the TX queue, set the |
| 5866 |
* OACTIVE flag, and wait for the NIC to drain the chain. |
| 5867 |
*/ |
| 5868 |
if (__predict_false(bxe_tx_encap(fp, &m))) { |
| 5869 |
fp->eth_q_stats.tx_encap_failures++; |
| 5870 |
if (m != NULL) { |
| 5871 |
/* mark the TX queue as full and return the frame */ |
| 5872 |
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); |
| 5873 |
if_sendq_prepend(ifp, m); |
| 5874 |
fp->eth_q_stats.mbuf_alloc_tx--; |
| 5875 |
fp->eth_q_stats.tx_queue_xoff++; |
| 5876 |
} |
| 5877 |
|
| 5878 |
/* stop looking for more work */ |
| 5879 |
break; |
| 5880 |
} |
| 5881 |
|
| 5882 |
/* the frame was enqueued successfully */ |
| 5883 |
tx_count++; |
| 5884 |
|
| 5885 |
/* send a copy of the frame to any BPF listeners. */ |
| 5886 |
if_etherbpfmtap(ifp, m); |
| 5887 |
|
| 5888 |
tx_bd_avail = bxe_tx_avail(sc, fp); |
| 5889 |
|
| 5890 |
/* handle any completions if we're running low */ |
| 5891 |
if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { |
| 5892 |
/* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ |
| 5893 |
bxe_txeof(sc, fp); |
| 5894 |
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { |
| 5895 |
break; |
| 5896 |
} |
| 5897 |
} |
| 5898 |
} |
| 5899 |
|
| 5900 |
/* all TX packets were dequeued and/or the tx ring is full */ |
| 5901 |
if (tx_count > 0) { |
| 5902 |
/* reset the TX watchdog timeout timer */ |
| 5903 |
fp->watchdog_timer = BXE_TX_TIMEOUT; |
| 5904 |
} |
| 5905 |
} |
| 5906 |
|
| 5907 |
/* Legacy (non-RSS) dispatch routine */ |
| 5908 |
static void |
| 5909 |
bxe_tx_start(if_t ifp) |
| 5910 |
{ |
| 5911 |
struct bxe_softc *sc; |
| 5912 |
struct bxe_fastpath *fp; |
| 5913 |
|
| 5914 |
sc = if_getsoftc(ifp); |
| 5915 |
|
| 5916 |
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { |
| 5917 |
BLOGW(sc, "Interface not running, ignoring transmit request\n"); |
| 5918 |
return; |
| 5919 |
} |
| 5920 |
|
| 5921 |
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { |
| 5922 |
BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); |
| 5923 |
return; |
| 5924 |
} |
| 5925 |
|
| 5926 |
if (!sc->link_vars.link_up) { |
| 5927 |
BLOGW(sc, "Interface link is down, ignoring transmit request\n"); |
| 5928 |
return; |
| 5929 |
} |
| 5930 |
|
| 5931 |
fp = &sc->fp[0]; |
| 5932 |
|
| 5933 |
BXE_FP_TX_LOCK(fp); |
| 5934 |
bxe_tx_start_locked(sc, ifp, fp); |
| 5935 |
BXE_FP_TX_UNLOCK(fp); |
| 5936 |
} |
| 5937 |
|
| 5938 |
#if __FreeBSD_version >= 800000 |
| 5939 |
|
| 5940 |
static int |
| 5941 |
bxe_tx_mq_start_locked(struct bxe_softc *sc, |
| 5942 |
if_t ifp, |
| 5943 |
struct bxe_fastpath *fp, |
| 5944 |
struct mbuf *m) |
| 5945 |
{ |
| 5946 |
struct buf_ring *tx_br = fp->tx_br; |
| 5947 |
struct mbuf *next; |
| 5948 |
int depth, rc, tx_count; |
| 5949 |
uint16_t tx_bd_avail; |
| 5950 |
|
| 5951 |
rc = tx_count = 0; |
| 5952 |
|
| 5953 |
if (!tx_br) { |
| 5954 |
BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); |
| 5955 |
return (EINVAL); |
| 5956 |
} |
| 5957 |
|
| 5958 |
/* fetch the depth of the driver queue */ |
| 5959 |
depth = drbr_inuse_drv(ifp, tx_br); |
| 5960 |
if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { |
| 5961 |
fp->eth_q_stats.tx_max_drbr_queue_depth = depth; |
| 5962 |
} |
| 5963 |
|
| 5964 |
BXE_FP_TX_LOCK_ASSERT(fp); |
| 5965 |
|
| 5966 |
if (m == NULL) { |
| 5967 |
/* no new work, check for pending frames */ |
| 5968 |
next = drbr_dequeue_drv(ifp, tx_br); |
| 5969 |
} else if (drbr_needs_enqueue_drv(ifp, tx_br)) { |
| 5970 |
/* have both new and pending work, maintain packet order */ |
| 5971 |
rc = drbr_enqueue_drv(ifp, tx_br, m); |
| 5972 |
if (rc != 0) { |
| 5973 |
fp->eth_q_stats.tx_soft_errors++; |
| 5974 |
goto bxe_tx_mq_start_locked_exit; |
| 5975 |
} |
| 5976 |
next = drbr_dequeue_drv(ifp, tx_br); |
| 5977 |
} else { |
| 5978 |
/* new work only and nothing pending */ |
| 5979 |
next = m; |
| 5980 |
} |
| 5981 |
|
| 5982 |
/* keep adding entries while there are frames to send */ |
| 5983 |
while (next != NULL) { |
| 5984 |
|
| 5985 |
/* the mbuf now belongs to us */ |
| 5986 |
fp->eth_q_stats.mbuf_alloc_tx++; |
| 5987 |
|
| 5988 |
/* |
| 5989 |
* Put the frame into the transmit ring. If we don't have room, |
| 5990 |
* place the mbuf back at the head of the TX queue, set the |
| 5991 |
* OACTIVE flag, and wait for the NIC to drain the chain. |
| 5992 |
*/ |
| 5993 |
rc = bxe_tx_encap(fp, &next); |
| 5994 |
if (__predict_false(rc != 0)) { |
| 5995 |
fp->eth_q_stats.tx_encap_failures++; |
| 5996 |
if (next != NULL) { |
| 5997 |
/* mark the TX queue as full and save the frame */ |
| 5998 |
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); |
| 5999 |
/* XXX this may reorder the frame */ |
| 6000 |
rc = drbr_enqueue_drv(ifp, tx_br, next); |
| 6001 |
fp->eth_q_stats.mbuf_alloc_tx--; |
| 6002 |
fp->eth_q_stats.tx_frames_deferred++; |
| 6003 |
} |
| 6004 |
|
| 6005 |
/* stop looking for more work */ |
| 6006 |
break; |
| 6007 |
} |
| 6008 |
|
| 6009 |
/* the transmit frame was enqueued successfully */ |
| 6010 |
tx_count++; |
| 6011 |
|
| 6012 |
/* send a copy of the frame to any BPF listeners */ |
| 6013 |
if_etherbpfmtap(ifp, next); |
| 6014 |
|
| 6015 |
tx_bd_avail = bxe_tx_avail(sc, fp); |
| 6016 |
|
| 6017 |
/* handle any completions if we're running low */ |
| 6018 |
if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { |
| 6019 |
/* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ |
| 6020 |
bxe_txeof(sc, fp); |
| 6021 |
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { |
| 6022 |
break; |
| 6023 |
} |
| 6024 |
} |
| 6025 |
|
| 6026 |
next = drbr_dequeue_drv(ifp, tx_br); |
| 6027 |
} |
| 6028 |
|
| 6029 |
/* all TX packets were dequeued and/or the tx ring is full */ |
| 6030 |
if (tx_count > 0) { |
| 6031 |
/* reset the TX watchdog timeout timer */ |
| 6032 |
fp->watchdog_timer = BXE_TX_TIMEOUT; |
| 6033 |
} |
| 6034 |
|
| 6035 |
bxe_tx_mq_start_locked_exit: |
| 6036 |
|
| 6037 |
return (rc); |
| 6038 |
} |
| 6039 |
|
| 6040 |
/* Multiqueue (TSS) dispatch routine. */ |
| 6041 |
static int |
| 6042 |
bxe_tx_mq_start(struct ifnet *ifp, |
| 6043 |
struct mbuf *m) |
| 6044 |
{ |
| 6045 |
struct bxe_softc *sc = if_getsoftc(ifp); |
| 6046 |
struct bxe_fastpath *fp; |
| 6047 |
int fp_index, rc; |
| 6048 |
|
| 6049 |
fp_index = 0; /* default is the first queue */ |
| 6050 |
|
| 6051 |
/* change the queue if using flow ID */ |
| 6052 |
if ((m->m_flags & M_FLOWID) != 0) { |
| 6053 |
fp_index = (m->m_pkthdr.flowid % sc->num_queues); |
| 6054 |
} |
| 6055 |
|
| 6056 |
fp = &sc->fp[fp_index]; |
| 6057 |
|
| 6058 |
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { |
| 6059 |
BLOGW(sc, "Interface not running, ignoring transmit request\n"); |
| 6060 |
return (ENETDOWN); |
| 6061 |
} |
| 6062 |
|
| 6063 |
if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { |
| 6064 |
BLOGW(sc, "Interface TX queue is full, ignoring transmit request\n"); |
| 6065 |
return (EBUSY); |
| 6066 |
} |
| 6067 |
|
| 6068 |
if (!sc->link_vars.link_up) { |
| 6069 |
BLOGW(sc, "Interface link is down, ignoring transmit request\n"); |
| 6070 |
return (ENETDOWN); |
| 6071 |
} |
| 6072 |
|
| 6073 |
/* XXX change to TRYLOCK here and if failed then schedule taskqueue */ |
| 6074 |
|
| 6075 |
BXE_FP_TX_LOCK(fp); |
| 6076 |
rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); |
| 6077 |
BXE_FP_TX_UNLOCK(fp); |
| 6078 |
|
| 6079 |
return (rc); |
| 6080 |
} |
| 6081 |
|
| 6082 |
static void |
| 6083 |
bxe_mq_flush(struct ifnet *ifp) |
| 6084 |
{ |
| 6085 |
struct bxe_softc *sc = if_getsoftc(ifp); |
| 6086 |
struct bxe_fastpath *fp; |
| 6087 |
struct mbuf *m; |
| 6088 |
int i; |
| 6089 |
|
| 6090 |
for (i = 0; i < sc->num_queues; i++) { |
| 6091 |
fp = &sc->fp[i]; |
| 6092 |
|
| 6093 |
if (fp->state != BXE_FP_STATE_OPEN) { |
| 6094 |
BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", |
| 6095 |
fp->index, fp->state); |
| 6096 |
continue; |
| 6097 |
} |
| 6098 |
|
| 6099 |
if (fp->tx_br != NULL) { |
| 6100 |
BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); |
| 6101 |
BXE_FP_TX_LOCK(fp); |
| 6102 |
while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { |
| 6103 |
m_freem(m); |
| 6104 |
} |
| 6105 |
BXE_FP_TX_UNLOCK(fp); |
| 6106 |
} |
| 6107 |
} |
| 6108 |
|
| 6109 |
if_qflush_drv(ifp); |
| 6110 |
} |
| 6111 |
|
| 6112 |
#endif /* FreeBSD_version >= 800000 */ |
| 6113 |
|
| 6114 |
static uint16_t |
| 6115 |
bxe_cid_ilt_lines(struct bxe_softc *sc) |
| 6116 |
{ |
| 6117 |
if (IS_SRIOV(sc)) { |
| 6118 |
return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); |
| 6119 |
} |
| 6120 |
return (L2_ILT_LINES(sc)); |
| 6121 |
} |
| 6122 |
|
| 6123 |
static void |
| 6124 |
bxe_ilt_set_info(struct bxe_softc *sc) |
| 6125 |
{ |
| 6126 |
struct ilt_client_info *ilt_client; |
| 6127 |
struct ecore_ilt *ilt = sc->ilt; |
| 6128 |
uint16_t line = 0; |
| 6129 |
|
| 6130 |
ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); |
| 6131 |
BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); |
| 6132 |
|
| 6133 |
/* CDU */ |
| 6134 |
ilt_client = &ilt->clients[ILT_CLIENT_CDU]; |
| 6135 |
ilt_client->client_num = ILT_CLIENT_CDU; |
| 6136 |
ilt_client->page_size = CDU_ILT_PAGE_SZ; |
| 6137 |
ilt_client->flags = ILT_CLIENT_SKIP_MEM; |
| 6138 |
ilt_client->start = line; |
| 6139 |
line += bxe_cid_ilt_lines(sc); |
| 6140 |
|
| 6141 |
if (CNIC_SUPPORT(sc)) { |
| 6142 |
line += CNIC_ILT_LINES; |
| 6143 |
} |
| 6144 |
|
| 6145 |
ilt_client->end = (line - 1); |
| 6146 |
|
| 6147 |
BLOGD(sc, DBG_LOAD, |
| 6148 |
"ilt client[CDU]: start %d, end %d, " |
| 6149 |
"psz 0x%x, flags 0x%x, hw psz %d\n", |
| 6150 |
ilt_client->start, ilt_client->end, |
| 6151 |
ilt_client->page_size, |
| 6152 |
ilt_client->flags, |
| 6153 |
ilog2(ilt_client->page_size >> 12)); |
| 6154 |
|
| 6155 |
/* QM */ |
| 6156 |
if (QM_INIT(sc->qm_cid_count)) { |
| 6157 |
ilt_client = &ilt->clients[ILT_CLIENT_QM]; |
| 6158 |
ilt_client->client_num = ILT_CLIENT_QM; |
| 6159 |
ilt_client->page_size = QM_ILT_PAGE_SZ; |
| 6160 |
ilt_client->flags = 0; |
| 6161 |
ilt_client->start = line; |
| 6162 |
|
| 6163 |
/* 4 bytes for each cid */ |
| 6164 |
line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, |
| 6165 |
QM_ILT_PAGE_SZ); |
| 6166 |
|
| 6167 |
ilt_client->end = (line - 1); |
| 6168 |
|
| 6169 |
BLOGD(sc, DBG_LOAD, |
| 6170 |
"ilt client[QM]: start %d, end %d, " |
| 6171 |
"psz 0x%x, flags 0x%x, hw psz %d\n", |
| 6172 |
ilt_client->start, ilt_client->end, |
| 6173 |
ilt_client->page_size, ilt_client->flags, |
| 6174 |
ilog2(ilt_client->page_size >> 12)); |
| 6175 |
} |
| 6176 |
|
| 6177 |
if (CNIC_SUPPORT(sc)) { |
| 6178 |
/* SRC */ |
| 6179 |
ilt_client = &ilt->clients[ILT_CLIENT_SRC]; |
| 6180 |
ilt_client->client_num = ILT_CLIENT_SRC; |
| 6181 |
ilt_client->page_size = SRC_ILT_PAGE_SZ; |
| 6182 |
ilt_client->flags = 0; |
| 6183 |
ilt_client->start = line; |
| 6184 |
line += SRC_ILT_LINES; |
| 6185 |
ilt_client->end = (line - 1); |
| 6186 |
|
| 6187 |
BLOGD(sc, DBG_LOAD, |
| 6188 |
"ilt client[SRC]: start %d, end %d, " |
| 6189 |
"psz 0x%x, flags 0x%x, hw psz %d\n", |
| 6190 |
ilt_client->start, ilt_client->end, |
| 6191 |
ilt_client->page_size, ilt_client->flags, |
| 6192 |
ilog2(ilt_client->page_size >> 12)); |
| 6193 |
|
| 6194 |
/* TM */ |
| 6195 |
ilt_client = &ilt->clients[ILT_CLIENT_TM]; |
| 6196 |
ilt_client->client_num = ILT_CLIENT_TM; |
| 6197 |
ilt_client->page_size = TM_ILT_PAGE_SZ; |
| 6198 |
ilt_client->flags = 0; |
| 6199 |
ilt_client->start = line; |
| 6200 |
line += TM_ILT_LINES; |
| 6201 |
ilt_client->end = (line - 1); |
| 6202 |
|
| 6203 |
BLOGD(sc, DBG_LOAD, |
| 6204 |
"ilt client[TM]: start %d, end %d, " |
| 6205 |
"psz 0x%x, flags 0x%x, hw psz %d\n", |
| 6206 |
ilt_client->start, ilt_client->end, |
| 6207 |
ilt_client->page_size, ilt_client->flags, |
| 6208 |
ilog2(ilt_client->page_size >> 12)); |
| 6209 |
} |
| 6210 |
|
| 6211 |
KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); |
| 6212 |
} |
| 6213 |
|
| 6214 |
static void |
| 6215 |
bxe_set_fp_rx_buf_size(struct bxe_softc *sc) |
| 6216 |
{ |
| 6217 |
int i; |
| 6218 |
|
| 6219 |
BLOGD(sc, DBG_LOAD, "mtu = %d\n", sc->mtu); |
| 6220 |
|
| 6221 |
for (i = 0; i < sc->num_queues; i++) { |
| 6222 |
/* get the Rx buffer size for RX frames */ |
| 6223 |
sc->fp[i].rx_buf_size = |
| 6224 |
(IP_HEADER_ALIGNMENT_PADDING + |
| 6225 |
ETH_OVERHEAD + |
| 6226 |
sc->mtu); |
| 6227 |
|
| 6228 |
BLOGD(sc, DBG_LOAD, "rx_buf_size for fp[%02d] = %d\n", |
| 6229 |
i, sc->fp[i].rx_buf_size); |
| 6230 |
|
| 6231 |
/* get the mbuf allocation size for RX frames */ |
| 6232 |
if (sc->fp[i].rx_buf_size <= MCLBYTES) { |
| 6233 |
sc->fp[i].mbuf_alloc_size = MCLBYTES; |
| 6234 |
} else if (sc->fp[i].rx_buf_size <= BCM_PAGE_SIZE) { |
| 6235 |
sc->fp[i].mbuf_alloc_size = PAGE_SIZE; |
| 6236 |
} else { |
| 6237 |
sc->fp[i].mbuf_alloc_size = MJUM9BYTES; |
| 6238 |
} |
| 6239 |
|
| 6240 |
BLOGD(sc, DBG_LOAD, "mbuf_alloc_size for fp[%02d] = %d\n", |
| 6241 |
i, sc->fp[i].mbuf_alloc_size); |
| 6242 |
} |
| 6243 |
} |
| 6244 |
|
| 6245 |
static int |
| 6246 |
bxe_alloc_ilt_mem(struct bxe_softc *sc) |
| 6247 |
{ |
| 6248 |
int rc = 0; |
| 6249 |
|
| 6250 |
if ((sc->ilt = |
| 6251 |
(struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), |
| 6252 |
M_BXE_ILT, |
| 6253 |
(M_NOWAIT | M_ZERO))) == NULL) { |
| 6254 |
rc = 1; |
| 6255 |
} |
| 6256 |
|
| 6257 |
return (rc); |
| 6258 |
} |
| 6259 |
|
| 6260 |
static int |
| 6261 |
bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) |
| 6262 |
{ |
| 6263 |
int rc = 0; |
| 6264 |
|
| 6265 |
if ((sc->ilt->lines = |
| 6266 |
(struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), |
| 6267 |
M_BXE_ILT, |
| 6268 |
(M_NOWAIT | M_ZERO))) == NULL) { |
| 6269 |
rc = 1; |
| 6270 |
} |
| 6271 |
|
| 6272 |
return (rc); |
| 6273 |
} |
| 6274 |
|
| 6275 |
static void |
| 6276 |
bxe_free_ilt_mem(struct bxe_softc *sc) |
| 6277 |
{ |
| 6278 |
if (sc->ilt != NULL) { |
| 6279 |
free(sc->ilt, M_BXE_ILT); |
| 6280 |
sc->ilt = NULL; |
| 6281 |
} |
| 6282 |
} |
| 6283 |
|
| 6284 |
static void |
| 6285 |
bxe_free_ilt_lines_mem(struct bxe_softc *sc) |
| 6286 |
{ |
| 6287 |
if (sc->ilt->lines != NULL) { |
| 6288 |
free(sc->ilt->lines, M_BXE_ILT); |
| 6289 |
sc->ilt->lines = NULL; |
| 6290 |
} |
| 6291 |
} |
| 6292 |
|
| 6293 |
static void |
| 6294 |
bxe_free_mem(struct bxe_softc *sc) |
| 6295 |
{ |
| 6296 |
int i; |
| 6297 |
|
| 6298 |
#if 0 |
| 6299 |
if (!CONFIGURE_NIC_MODE(sc)) { |
| 6300 |
/* free searcher T2 table */ |
| 6301 |
bxe_dma_free(sc, &sc->t2); |
| 6302 |
} |
| 6303 |
#endif |
| 6304 |
|
| 6305 |
for (i = 0; i < L2_ILT_LINES(sc); i++) { |
| 6306 |
bxe_dma_free(sc, &sc->context[i].vcxt_dma); |
| 6307 |
sc->context[i].vcxt = NULL; |
| 6308 |
sc->context[i].size = 0; |
| 6309 |
} |
| 6310 |
|
| 6311 |
ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); |
| 6312 |
|
| 6313 |
bxe_free_ilt_lines_mem(sc); |
| 6314 |
|
| 6315 |
#if 0 |
| 6316 |
bxe_iov_free_mem(sc); |
| 6317 |
#endif |
| 6318 |
} |
| 6319 |
|
| 6320 |
static int |
| 6321 |
bxe_alloc_mem(struct bxe_softc *sc) |
| 6322 |
{ |
| 6323 |
int context_size; |
| 6324 |
int allocated; |
| 6325 |
int i; |
| 6326 |
|
| 6327 |
#if 0 |
| 6328 |
if (!CONFIGURE_NIC_MODE(sc)) { |
| 6329 |
/* allocate searcher T2 table */ |
| 6330 |
if (bxe_dma_alloc(sc, SRC_T2_SZ, |
| 6331 |
&sc->t2, "searcher t2 table") != 0) { |
| 6332 |
return (-1); |
| 6333 |
} |
| 6334 |
} |
| 6335 |
#endif |
| 6336 |
|
| 6337 |
/* |
| 6338 |
* Allocate memory for CDU context: |
| 6339 |
* This memory is allocated separately and not in the generic ILT |
| 6340 |
* functions because CDU differs in few aspects: |
| 6341 |
* 1. There can be multiple entities allocating memory for context - |
| 6342 |
* regular L2, CNIC, and SRIOV drivers. Each separately controls |
| 6343 |
* its own ILT lines. |
| 6344 |
* 2. Since CDU page-size is not a single 4KB page (which is the case |
| 6345 |
* for the other ILT clients), to be efficient we want to support |
| 6346 |
* allocation of sub-page-size in the last entry. |
| 6347 |
* 3. Context pointers are used by the driver to pass to FW / update |
| 6348 |
* the context (for the other ILT clients the pointers are used just to |
| 6349 |
* free the memory during unload). |
| 6350 |
*/ |
| 6351 |
context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); |
| 6352 |
for (i = 0, allocated = 0; allocated < context_size; i++) { |
| 6353 |
sc->context[i].size = min(CDU_ILT_PAGE_SZ, |
| 6354 |
(context_size - allocated)); |
| 6355 |
|
| 6356 |
if (bxe_dma_alloc(sc, sc->context[i].size, |
| 6357 |
&sc->context[i].vcxt_dma, |
| 6358 |
"cdu context") != 0) { |
| 6359 |
bxe_free_mem(sc); |
| 6360 |
return (-1); |
| 6361 |
} |
| 6362 |
|
| 6363 |
sc->context[i].vcxt = |
| 6364 |
(union cdu_context *)sc->context[i].vcxt_dma.vaddr; |
| 6365 |
|
| 6366 |
allocated += sc->context[i].size; |
| 6367 |
} |
| 6368 |
|
| 6369 |
bxe_alloc_ilt_lines_mem(sc); |
| 6370 |
|
| 6371 |
BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", |
| 6372 |
sc->ilt, sc->ilt->start_line, sc->ilt->lines); |
| 6373 |
{ |
| 6374 |
for (i = 0; i < 4; i++) { |
| 6375 |
BLOGD(sc, DBG_LOAD, |
| 6376 |
"c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", |
| 6377 |
i, |
| 6378 |
sc->ilt->clients[i].page_size, |
| 6379 |
sc->ilt->clients[i].start, |
| 6380 |
sc->ilt->clients[i].end, |
| 6381 |
sc->ilt->clients[i].client_num, |
| 6382 |
sc->ilt->clients[i].flags); |
| 6383 |
} |
| 6384 |
} |
| 6385 |
if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { |
| 6386 |
BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); |
| 6387 |
bxe_free_mem(sc); |
| 6388 |
return (-1); |
| 6389 |
} |
| 6390 |
|
| 6391 |
#if 0 |
| 6392 |
if (bxe_iov_alloc_mem(sc)) { |
| 6393 |
BLOGE(sc, "Failed to allocate memory for SRIOV\n"); |
| 6394 |
bxe_free_mem(sc); |
| 6395 |
return (-1); |
| 6396 |
} |
| 6397 |
#endif |
| 6398 |
|
| 6399 |
return (0); |
| 6400 |
} |
| 6401 |
|
| 6402 |
static void |
| 6403 |
bxe_free_rx_bd_chain(struct bxe_fastpath *fp) |
| 6404 |
{ |
| 6405 |
struct bxe_softc *sc; |
| 6406 |
int i; |
| 6407 |
|
| 6408 |
sc = fp->sc; |
| 6409 |
|
| 6410 |
if (fp->rx_mbuf_tag == NULL) { |
| 6411 |
return; |
| 6412 |
} |
| 6413 |
|
| 6414 |
/* free all mbufs and unload all maps */ |
| 6415 |
for (i = 0; i < RX_BD_TOTAL; i++) { |
| 6416 |
if (fp->rx_mbuf_chain[i].m_map != NULL) { |
| 6417 |
bus_dmamap_sync(fp->rx_mbuf_tag, |
| 6418 |
fp->rx_mbuf_chain[i].m_map, |
| 6419 |
BUS_DMASYNC_POSTREAD); |
| 6420 |
bus_dmamap_unload(fp->rx_mbuf_tag, |
| 6421 |
fp->rx_mbuf_chain[i].m_map); |
| 6422 |
} |
| 6423 |
|
| 6424 |
if (fp->rx_mbuf_chain[i].m != NULL) { |
| 6425 |
m_freem(fp->rx_mbuf_chain[i].m); |
| 6426 |
fp->rx_mbuf_chain[i].m = NULL; |
| 6427 |
fp->eth_q_stats.mbuf_alloc_rx--; |
| 6428 |
} |
| 6429 |
} |
| 6430 |
} |
| 6431 |
|
| 6432 |
static void |
| 6433 |
bxe_free_tpa_pool(struct bxe_fastpath *fp) |
| 6434 |
{ |
| 6435 |
struct bxe_softc *sc; |
| 6436 |
int i, max_agg_queues; |
| 6437 |
|
| 6438 |
sc = fp->sc; |
| 6439 |
|
| 6440 |
if (fp->rx_mbuf_tag == NULL) { |
| 6441 |
return; |
| 6442 |
} |
| 6443 |
|
| 6444 |
max_agg_queues = MAX_AGG_QS(sc); |
| 6445 |
|
| 6446 |
/* release all mbufs and unload all DMA maps in the TPA pool */ |
| 6447 |
for (i = 0; i < max_agg_queues; i++) { |
| 6448 |
if (fp->rx_tpa_info[i].bd.m_map != NULL) { |
| 6449 |
bus_dmamap_sync(fp->rx_mbuf_tag, |
| 6450 |
fp->rx_tpa_info[i].bd.m_map, |
| 6451 |
BUS_DMASYNC_POSTREAD); |
| 6452 |
bus_dmamap_unload(fp->rx_mbuf_tag, |
| 6453 |
fp->rx_tpa_info[i].bd.m_map); |
| 6454 |
} |
| 6455 |
|
| 6456 |
if (fp->rx_tpa_info[i].bd.m != NULL) { |
| 6457 |
m_freem(fp->rx_tpa_info[i].bd.m); |
| 6458 |
fp->rx_tpa_info[i].bd.m = NULL; |
| 6459 |
fp->eth_q_stats.mbuf_alloc_tpa--; |
| 6460 |
} |
| 6461 |
} |
| 6462 |
} |
| 6463 |
|
| 6464 |
static void |
| 6465 |
bxe_free_sge_chain(struct bxe_fastpath *fp) |
| 6466 |
{ |
| 6467 |
struct bxe_softc *sc; |
| 6468 |
int i; |
| 6469 |
|
| 6470 |
sc = fp->sc; |
| 6471 |
|
| 6472 |
if (fp->rx_sge_mbuf_tag == NULL) { |
| 6473 |
return; |
| 6474 |
} |
| 6475 |
|
| 6476 |
/* rree all mbufs and unload all maps */ |
| 6477 |
for (i = 0; i < RX_SGE_TOTAL; i++) { |
| 6478 |
if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { |
| 6479 |
bus_dmamap_sync(fp->rx_sge_mbuf_tag, |
| 6480 |
fp->rx_sge_mbuf_chain[i].m_map, |
| 6481 |
BUS_DMASYNC_POSTREAD); |
| 6482 |
bus_dmamap_unload(fp->rx_sge_mbuf_tag, |
| 6483 |
fp->rx_sge_mbuf_chain[i].m_map); |
| 6484 |
} |
| 6485 |
|
| 6486 |
if (fp->rx_sge_mbuf_chain[i].m != NULL) { |
| 6487 |
m_freem(fp->rx_sge_mbuf_chain[i].m); |
| 6488 |
fp->rx_sge_mbuf_chain[i].m = NULL; |
| 6489 |
fp->eth_q_stats.mbuf_alloc_sge--; |
| 6490 |
} |
| 6491 |
} |
| 6492 |
} |
| 6493 |
|
| 6494 |
static void |
| 6495 |
bxe_free_fp_buffers(struct bxe_softc *sc) |
| 6496 |
{ |
| 6497 |
struct bxe_fastpath *fp; |
| 6498 |
int i; |
| 6499 |
|
| 6500 |
for (i = 0; i < sc->num_queues; i++) { |
| 6501 |
fp = &sc->fp[i]; |
| 6502 |
|
| 6503 |
#if __FreeBSD_version >= 800000 |
| 6504 |
if (fp->tx_br != NULL) { |
| 6505 |
struct mbuf *m; |
| 6506 |
/* just in case bxe_mq_flush() wasn't called */ |
| 6507 |
while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { |
| 6508 |
m_freem(m); |
| 6509 |
} |
| 6510 |
buf_ring_free(fp->tx_br, M_DEVBUF); |
| 6511 |
fp->tx_br = NULL; |
| 6512 |
} |
| 6513 |
#endif |
| 6514 |
|
| 6515 |
/* free all RX buffers */ |
| 6516 |
bxe_free_rx_bd_chain(fp); |
| 6517 |
bxe_free_tpa_pool(fp); |
| 6518 |
bxe_free_sge_chain(fp); |
| 6519 |
|
| 6520 |
if (fp->eth_q_stats.mbuf_alloc_rx != 0) { |
| 6521 |
BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", |
| 6522 |
fp->eth_q_stats.mbuf_alloc_rx); |
| 6523 |
} |
| 6524 |
|
| 6525 |
if (fp->eth_q_stats.mbuf_alloc_sge != 0) { |
| 6526 |
BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", |
| 6527 |
fp->eth_q_stats.mbuf_alloc_sge); |
| 6528 |
} |
| 6529 |
|
| 6530 |
if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { |
| 6531 |
BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", |
| 6532 |
fp->eth_q_stats.mbuf_alloc_tpa); |
| 6533 |
} |
| 6534 |
|
| 6535 |
if (fp->eth_q_stats.mbuf_alloc_tx != 0) { |
| 6536 |
BLOGE(sc, "failed to release tx mbufs (%d left)\n", |
| 6537 |
fp->eth_q_stats.mbuf_alloc_tx); |
| 6538 |
} |
| 6539 |
|
| 6540 |
/* XXX verify all mbufs were reclaimed */ |
| 6541 |
|
| 6542 |
if (mtx_initialized(&fp->tx_mtx)) { |
| 6543 |
mtx_destroy(&fp->tx_mtx); |
| 6544 |
} |
| 6545 |
|
| 6546 |
if (mtx_initialized(&fp->rx_mtx)) { |
| 6547 |
mtx_destroy(&fp->rx_mtx); |
| 6548 |
} |
| 6549 |
} |
| 6550 |
} |
| 6551 |
|
| 6552 |
static int |
| 6553 |
bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, |
| 6554 |
uint16_t prev_index, |
| 6555 |
uint16_t index) |
| 6556 |
{ |
| 6557 |
struct bxe_sw_rx_bd *rx_buf; |
| 6558 |
struct eth_rx_bd *rx_bd; |
| 6559 |
bus_dma_segment_t segs[1]; |
| 6560 |
bus_dmamap_t map; |
| 6561 |
struct mbuf *m; |
| 6562 |
int nsegs, rc; |
| 6563 |
|
| 6564 |
rc = 0; |
| 6565 |
|
| 6566 |
/* allocate the new RX BD mbuf */ |
| 6567 |
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); |
| 6568 |
if (__predict_false(m == NULL)) { |
| 6569 |
fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; |
| 6570 |
return (ENOBUFS); |
| 6571 |
} |
| 6572 |
|
| 6573 |
fp->eth_q_stats.mbuf_alloc_rx++; |
| 6574 |
|
| 6575 |
/* initialize the mbuf buffer length */ |
| 6576 |
m->m_pkthdr.len = m->m_len = fp->rx_buf_size; |
| 6577 |
|
| 6578 |
/* map the mbuf into non-paged pool */ |
| 6579 |
rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, |
| 6580 |
fp->rx_mbuf_spare_map, |
| 6581 |
m, segs, &nsegs, BUS_DMA_NOWAIT); |
| 6582 |
if (__predict_false(rc != 0)) { |
| 6583 |
fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; |
| 6584 |
m_freem(m); |
| 6585 |
fp->eth_q_stats.mbuf_alloc_rx--; |
| 6586 |
return (rc); |
| 6587 |
} |
| 6588 |
|
| 6589 |
/* all mbufs must map to a single segment */ |
| 6590 |
KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); |
| 6591 |
|
| 6592 |
/* release any existing RX BD mbuf mappings */ |
| 6593 |
|
| 6594 |
if (prev_index != index) { |
| 6595 |
rx_buf = &fp->rx_mbuf_chain[prev_index]; |
| 6596 |
|
| 6597 |
if (rx_buf->m_map != NULL) { |
| 6598 |
bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, |
| 6599 |
BUS_DMASYNC_POSTREAD); |
| 6600 |
bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); |
| 6601 |
} |
| 6602 |
|
| 6603 |
/* |
| 6604 |
* We only get here from bxe_rxeof() when the maximum number |
| 6605 |
* of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already |
| 6606 |
* holds the mbuf in the prev_index so it's OK to NULL it out |
| 6607 |
* here without concern of a memory leak. |
| 6608 |
*/ |
| 6609 |
fp->rx_mbuf_chain[prev_index].m = NULL; |
| 6610 |
} |
| 6611 |
|
| 6612 |
rx_buf = &fp->rx_mbuf_chain[index]; |
| 6613 |
|
| 6614 |
if (rx_buf->m_map != NULL) { |
| 6615 |
bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, |
| 6616 |
BUS_DMASYNC_POSTREAD); |
| 6617 |
bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); |
| 6618 |
} |
| 6619 |
|
| 6620 |
/* save the mbuf and mapping info for a future packet */ |
| 6621 |
map = (prev_index != index) ? |
| 6622 |
fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; |
| 6623 |
rx_buf->m_map = fp->rx_mbuf_spare_map; |
| 6624 |
fp->rx_mbuf_spare_map = map; |
| 6625 |
bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, |
| 6626 |
BUS_DMASYNC_PREREAD); |
| 6627 |
rx_buf->m = m; |
| 6628 |
|
| 6629 |
rx_bd = &fp->rx_chain[index]; |
| 6630 |
rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); |
| 6631 |
rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); |
| 6632 |
|
| 6633 |
return (rc); |
| 6634 |
} |
| 6635 |
|
| 6636 |
static int |
| 6637 |
bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, |
| 6638 |
int queue) |
| 6639 |
{ |
| 6640 |
struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; |
| 6641 |
bus_dma_segment_t segs[1]; |
| 6642 |
bus_dmamap_t map; |
| 6643 |
struct mbuf *m; |
| 6644 |
int nsegs; |
| 6645 |
int rc = 0; |
| 6646 |
|
| 6647 |
/* allocate the new TPA mbuf */ |
| 6648 |
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); |
| 6649 |
if (__predict_false(m == NULL)) { |
| 6650 |
fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; |
| 6651 |
return (ENOBUFS); |
| 6652 |
} |
| 6653 |
|
| 6654 |
fp->eth_q_stats.mbuf_alloc_tpa++; |
| 6655 |
|
| 6656 |
/* initialize the mbuf buffer length */ |
| 6657 |
m->m_pkthdr.len = m->m_len = fp->rx_buf_size; |
| 6658 |
|
| 6659 |
/* map the mbuf into non-paged pool */ |
| 6660 |
rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, |
| 6661 |
fp->rx_tpa_info_mbuf_spare_map, |
| 6662 |
m, segs, &nsegs, BUS_DMA_NOWAIT); |
| 6663 |
if (__predict_false(rc != 0)) { |
| 6664 |
fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; |
| 6665 |
m_free(m); |
| 6666 |
fp->eth_q_stats.mbuf_alloc_tpa--; |
| 6667 |
return (rc); |
| 6668 |
} |
| 6669 |
|
| 6670 |
/* all mbufs must map to a single segment */ |
| 6671 |
KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); |
| 6672 |
|
| 6673 |
/* release any existing TPA mbuf mapping */ |
| 6674 |
if (tpa_info->bd.m_map != NULL) { |
| 6675 |
bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, |
| 6676 |
BUS_DMASYNC_POSTREAD); |
| 6677 |
bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); |
| 6678 |
} |
| 6679 |
|
| 6680 |
/* save the mbuf and mapping info for the TPA mbuf */ |
| 6681 |
map = tpa_info->bd.m_map; |
| 6682 |
tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; |
| 6683 |
fp->rx_tpa_info_mbuf_spare_map = map; |
| 6684 |
bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, |
| 6685 |
BUS_DMASYNC_PREREAD); |
| 6686 |
tpa_info->bd.m = m; |
| 6687 |
tpa_info->seg = segs[0]; |
| 6688 |
|
| 6689 |
return (rc); |
| 6690 |
} |
| 6691 |
|
| 6692 |
/* |
| 6693 |
* Allocate an mbuf and assign it to the receive scatter gather chain. The |
| 6694 |
* caller must take care to save a copy of the existing mbuf in the SG mbuf |
| 6695 |
* chain. |
| 6696 |
*/ |
| 6697 |
static int |
| 6698 |
bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, |
| 6699 |
uint16_t index) |
| 6700 |
{ |
| 6701 |
struct bxe_sw_rx_bd *sge_buf; |
| 6702 |
struct eth_rx_sge *sge; |
| 6703 |
bus_dma_segment_t segs[1]; |
| 6704 |
bus_dmamap_t map; |
| 6705 |
struct mbuf *m; |
| 6706 |
int nsegs; |
| 6707 |
int rc = 0; |
| 6708 |
|
| 6709 |
/* allocate a new SGE mbuf */ |
| 6710 |
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); |
| 6711 |
if (__predict_false(m == NULL)) { |
| 6712 |
fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; |
| 6713 |
return (ENOMEM); |
| 6714 |
} |
| 6715 |
|
| 6716 |
fp->eth_q_stats.mbuf_alloc_sge++; |
| 6717 |
|
| 6718 |
/* initialize the mbuf buffer length */ |
| 6719 |
m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; |
| 6720 |
|
| 6721 |
/* map the SGE mbuf into non-paged pool */ |
| 6722 |
rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, |
| 6723 |
fp->rx_sge_mbuf_spare_map, |
| 6724 |
m, segs, &nsegs, BUS_DMA_NOWAIT); |
| 6725 |
if (__predict_false(rc != 0)) { |
| 6726 |
fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; |
| 6727 |
m_freem(m); |
| 6728 |
fp->eth_q_stats.mbuf_alloc_sge--; |
| 6729 |
return (rc); |
| 6730 |
} |
| 6731 |
|
| 6732 |
/* all mbufs must map to a single segment */ |
| 6733 |
KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); |
| 6734 |
|
| 6735 |
sge_buf = &fp->rx_sge_mbuf_chain[index]; |
| 6736 |
|
| 6737 |
/* release any existing SGE mbuf mapping */ |
| 6738 |
if (sge_buf->m_map != NULL) { |
| 6739 |
bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, |
| 6740 |
BUS_DMASYNC_POSTREAD); |
| 6741 |
bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); |
| 6742 |
} |
| 6743 |
|
| 6744 |
/* save the mbuf and mapping info for a future packet */ |
| 6745 |
map = sge_buf->m_map; |
| 6746 |
sge_buf->m_map = fp->rx_sge_mbuf_spare_map; |
| 6747 |
fp->rx_sge_mbuf_spare_map = map; |
| 6748 |
bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, |
| 6749 |
BUS_DMASYNC_PREREAD); |
| 6750 |
sge_buf->m = m; |
| 6751 |
|
| 6752 |
sge = &fp->rx_sge_chain[index]; |
| 6753 |
sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); |
| 6754 |
sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); |
| 6755 |
|
| 6756 |
return (rc); |
| 6757 |
} |
| 6758 |
|
| 6759 |
static __noinline int |
| 6760 |
bxe_alloc_fp_buffers(struct bxe_softc *sc) |
| 6761 |
{ |
| 6762 |
struct bxe_fastpath *fp; |
| 6763 |
int i, j, rc = 0; |
| 6764 |
int ring_prod, cqe_ring_prod; |
| 6765 |
int max_agg_queues; |
| 6766 |
|
| 6767 |
for (i = 0; i < sc->num_queues; i++) { |
| 6768 |
fp = &sc->fp[i]; |
| 6769 |
|
| 6770 |
#if __FreeBSD_version >= 800000 |
| 6771 |
fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, |
| 6772 |
M_NOWAIT, &fp->tx_mtx); |
| 6773 |
if (fp->tx_br == NULL) { |
| 6774 |
BLOGE(sc, "buf_ring alloc fail for fp[%02d]\n", i); |
| 6775 |
goto bxe_alloc_fp_buffers_error; |
| 6776 |
} |
| 6777 |
#endif |
| 6778 |
|
| 6779 |
ring_prod = cqe_ring_prod = 0; |
| 6780 |
fp->rx_bd_cons = 0; |
| 6781 |
fp->rx_cq_cons = 0; |
| 6782 |
|
| 6783 |
/* allocate buffers for the RX BDs in RX BD chain */ |
| 6784 |
for (j = 0; j < sc->max_rx_bufs; j++) { |
| 6785 |
rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); |
| 6786 |
if (rc != 0) { |
| 6787 |
BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", |
| 6788 |
i, rc); |
| 6789 |
goto bxe_alloc_fp_buffers_error; |
| 6790 |
} |
| 6791 |
|
| 6792 |
ring_prod = RX_BD_NEXT(ring_prod); |
| 6793 |
cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); |
| 6794 |
} |
| 6795 |
|
| 6796 |
fp->rx_bd_prod = ring_prod; |
| 6797 |
fp->rx_cq_prod = cqe_ring_prod; |
| 6798 |
fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; |
| 6799 |
|
| 6800 |
if (if_getcapenable(sc->ifp) & IFCAP_LRO) { |
| 6801 |
max_agg_queues = MAX_AGG_QS(sc); |
| 6802 |
|
| 6803 |
fp->tpa_enable = TRUE; |
| 6804 |
|
| 6805 |
/* fill the TPA pool */ |
| 6806 |
for (j = 0; j < max_agg_queues; j++) { |
| 6807 |
rc = bxe_alloc_rx_tpa_mbuf(fp, j); |
| 6808 |
if (rc != 0) { |
| 6809 |
BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", |
| 6810 |
i, j); |
| 6811 |
fp->tpa_enable = FALSE; |
| 6812 |
goto bxe_alloc_fp_buffers_error; |
| 6813 |
} |
| 6814 |
|
| 6815 |
fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; |
| 6816 |
} |
| 6817 |
|
| 6818 |
if (fp->tpa_enable) { |
| 6819 |
/* fill the RX SGE chain */ |
| 6820 |
ring_prod = 0; |
| 6821 |
for (j = 0; j < RX_SGE_USABLE; j++) { |
| 6822 |
rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); |
| 6823 |
if (rc != 0) { |
| 6824 |
BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", |
| 6825 |
i, ring_prod); |
| 6826 |
fp->tpa_enable = FALSE; |
| 6827 |
ring_prod = 0; |
| 6828 |
goto bxe_alloc_fp_buffers_error; |
| 6829 |
} |
| 6830 |
|
| 6831 |
ring_prod = RX_SGE_NEXT(ring_prod); |
| 6832 |
} |
| 6833 |
|
| 6834 |
fp->rx_sge_prod = ring_prod; |
| 6835 |
} |
| 6836 |
} |
| 6837 |
} |
| 6838 |
|
| 6839 |
return (0); |
| 6840 |
|
| 6841 |
bxe_alloc_fp_buffers_error: |
| 6842 |
|
| 6843 |
/* unwind what was already allocated */ |
| 6844 |
bxe_free_rx_bd_chain(fp); |
| 6845 |
bxe_free_tpa_pool(fp); |
| 6846 |
bxe_free_sge_chain(fp); |
| 6847 |
|
| 6848 |
return (ENOBUFS); |
| 6849 |
} |
| 6850 |
|
| 6851 |
static void |
| 6852 |
bxe_free_fw_stats_mem(struct bxe_softc *sc) |
| 6853 |
{ |
| 6854 |
bxe_dma_free(sc, &sc->fw_stats_dma); |
| 6855 |
|
| 6856 |
sc->fw_stats_num = 0; |
| 6857 |
|
| 6858 |
sc->fw_stats_req_size = 0; |
| 6859 |
sc->fw_stats_req = NULL; |
| 6860 |
sc->fw_stats_req_mapping = 0; |
| 6861 |
|
| 6862 |
sc->fw_stats_data_size = 0; |
| 6863 |
sc->fw_stats_data = NULL; |
| 6864 |
sc->fw_stats_data_mapping = 0; |
| 6865 |
} |
| 6866 |
|
| 6867 |
static int |
| 6868 |
bxe_alloc_fw_stats_mem(struct bxe_softc *sc) |
| 6869 |
{ |
| 6870 |
uint8_t num_queue_stats; |
| 6871 |
int num_groups; |
| 6872 |
|
| 6873 |
/* number of queues for statistics is number of eth queues */ |
| 6874 |
num_queue_stats = BXE_NUM_ETH_QUEUES(sc); |
| 6875 |
|
| 6876 |
/* |
| 6877 |
* Total number of FW statistics requests = |
| 6878 |
* 1 for port stats + 1 for PF stats + num of queues |
| 6879 |
*/ |
| 6880 |
sc->fw_stats_num = (2 + num_queue_stats); |
| 6881 |
|
| 6882 |
/* |
| 6883 |
* Request is built from stats_query_header and an array of |
| 6884 |
* stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT |
| 6885 |
* rules. The real number or requests is configured in the |
| 6886 |
* stats_query_header. |
| 6887 |
*/ |
| 6888 |
num_groups = |
| 6889 |
((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + |
| 6890 |
((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); |
| 6891 |
|
| 6892 |
BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", |
| 6893 |
sc->fw_stats_num, num_groups); |
| 6894 |
|
| 6895 |
sc->fw_stats_req_size = |
| 6896 |
(sizeof(struct stats_query_header) + |
| 6897 |
(num_groups * sizeof(struct stats_query_cmd_group))); |
| 6898 |
|
| 6899 |
/* |
| 6900 |
* Data for statistics requests + stats_counter. |
| 6901 |
* stats_counter holds per-STORM counters that are incremented when |
| 6902 |
* STORM has finished with the current request. Memory for FCoE |
| 6903 |
* offloaded statistics are counted anyway, even if they will not be sent. |
| 6904 |
* VF stats are not accounted for here as the data of VF stats is stored |
| 6905 |
* in memory allocated by the VF, not here. |
| 6906 |
*/ |
| 6907 |
sc->fw_stats_data_size = |
| 6908 |
(sizeof(struct stats_counter) + |
| 6909 |
sizeof(struct per_port_stats) + |
| 6910 |
sizeof(struct per_pf_stats) + |
| 6911 |
/* sizeof(struct fcoe_statistics_params) + */ |
| 6912 |
(sizeof(struct per_queue_stats) * num_queue_stats)); |
| 6913 |
|
| 6914 |
if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), |
| 6915 |
&sc->fw_stats_dma, "fw stats") != 0) { |
| 6916 |
bxe_free_fw_stats_mem(sc); |
| 6917 |
return (-1); |
| 6918 |
} |
| 6919 |
|
| 6920 |
/* set up the shortcuts */ |
| 6921 |
|
| 6922 |
sc->fw_stats_req = |
| 6923 |
(struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; |
| 6924 |
sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; |
| 6925 |
|
| 6926 |
sc->fw_stats_data = |
| 6927 |
(struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + |
| 6928 |
sc->fw_stats_req_size); |
| 6929 |
sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + |
| 6930 |
sc->fw_stats_req_size); |
| 6931 |
|
| 6932 |
BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", |
| 6933 |
(uintmax_t)sc->fw_stats_req_mapping); |
| 6934 |
|
| 6935 |
BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", |
| 6936 |
(uintmax_t)sc->fw_stats_data_mapping); |
| 6937 |
|
| 6938 |
return (0); |
| 6939 |
} |
| 6940 |
|
| 6941 |
/* |
| 6942 |
* Bits map: |
| 6943 |
* 0-7 - Engine0 load counter. |
| 6944 |
* 8-15 - Engine1 load counter. |
| 6945 |
* 16 - Engine0 RESET_IN_PROGRESS bit. |
| 6946 |
* 17 - Engine1 RESET_IN_PROGRESS bit. |
| 6947 |
* 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active |
| 6948 |
* function on the engine |
| 6949 |
* 19 - Engine1 ONE_IS_LOADED. |
| 6950 |
* 20 - Chip reset flow bit. When set none-leader must wait for both engines |
| 6951 |
* leader to complete (check for both RESET_IN_PROGRESS bits and not |
| 6952 |
* for just the one belonging to its engine). |
| 6953 |
*/ |
| 6954 |
#define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 |
| 6955 |
#define BXE_PATH0_LOAD_CNT_MASK 0x000000ff |
| 6956 |
#define BXE_PATH0_LOAD_CNT_SHIFT 0 |
| 6957 |
#define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 |
| 6958 |
#define BXE_PATH1_LOAD_CNT_SHIFT 8 |
| 6959 |
#define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 |
| 6960 |
#define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 |
| 6961 |
#define BXE_GLOBAL_RESET_BIT 0x00040000 |
| 6962 |
|
| 6963 |
/* set the GLOBAL_RESET bit, should be run under rtnl lock */ |
| 6964 |
static void |
| 6965 |
bxe_set_reset_global(struct bxe_softc *sc) |
| 6966 |
{ |
| 6967 |
uint32_t val; |
| 6968 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 6969 |
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); |
| 6970 |
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); |
| 6971 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 6972 |
} |
| 6973 |
|
| 6974 |
/* clear the GLOBAL_RESET bit, should be run under rtnl lock */ |
| 6975 |
static void |
| 6976 |
bxe_clear_reset_global(struct bxe_softc *sc) |
| 6977 |
{ |
| 6978 |
uint32_t val; |
| 6979 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 6980 |
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); |
| 6981 |
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); |
| 6982 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 6983 |
} |
| 6984 |
|
| 6985 |
/* checks the GLOBAL_RESET bit, should be run under rtnl lock */ |
| 6986 |
static uint8_t |
| 6987 |
bxe_reset_is_global(struct bxe_softc *sc) |
| 6988 |
{ |
| 6989 |
uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); |
| 6990 |
BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); |
| 6991 |
return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; |
| 6992 |
} |
| 6993 |
|
| 6994 |
/* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ |
| 6995 |
static void |
| 6996 |
bxe_set_reset_done(struct bxe_softc *sc) |
| 6997 |
{ |
| 6998 |
uint32_t val; |
| 6999 |
uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : |
| 7000 |
BXE_PATH0_RST_IN_PROG_BIT; |
| 7001 |
|
| 7002 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 7003 |
|
| 7004 |
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); |
| 7005 |
/* Clear the bit */ |
| 7006 |
val &= ~bit; |
| 7007 |
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); |
| 7008 |
|
| 7009 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 7010 |
} |
| 7011 |
|
| 7012 |
/* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ |
| 7013 |
static void |
| 7014 |
bxe_set_reset_in_progress(struct bxe_softc *sc) |
| 7015 |
{ |
| 7016 |
uint32_t val; |
| 7017 |
uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : |
| 7018 |
BXE_PATH0_RST_IN_PROG_BIT; |
| 7019 |
|
| 7020 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 7021 |
|
| 7022 |
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); |
| 7023 |
/* Set the bit */ |
| 7024 |
val |= bit; |
| 7025 |
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); |
| 7026 |
|
| 7027 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 7028 |
} |
| 7029 |
|
| 7030 |
/* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ |
| 7031 |
static uint8_t |
| 7032 |
bxe_reset_is_done(struct bxe_softc *sc, |
| 7033 |
int engine) |
| 7034 |
{ |
| 7035 |
uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); |
| 7036 |
uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : |
| 7037 |
BXE_PATH0_RST_IN_PROG_BIT; |
| 7038 |
|
| 7039 |
/* return false if bit is set */ |
| 7040 |
return (val & bit) ? FALSE : TRUE; |
| 7041 |
} |
| 7042 |
|
| 7043 |
/* get the load status for an engine, should be run under rtnl lock */ |
| 7044 |
static uint8_t |
| 7045 |
bxe_get_load_status(struct bxe_softc *sc, |
| 7046 |
int engine) |
| 7047 |
{ |
| 7048 |
uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : |
| 7049 |
BXE_PATH0_LOAD_CNT_MASK; |
| 7050 |
uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : |
| 7051 |
BXE_PATH0_LOAD_CNT_SHIFT; |
| 7052 |
uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); |
| 7053 |
|
| 7054 |
BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); |
| 7055 |
|
| 7056 |
val = ((val & mask) >> shift); |
| 7057 |
|
| 7058 |
BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); |
| 7059 |
|
| 7060 |
return (val != 0); |
| 7061 |
} |
| 7062 |
|
| 7063 |
/* set pf load mark */ |
| 7064 |
/* XXX needs to be under rtnl lock */ |
| 7065 |
static void |
| 7066 |
bxe_set_pf_load(struct bxe_softc *sc) |
| 7067 |
{ |
| 7068 |
uint32_t val; |
| 7069 |
uint32_t val1; |
| 7070 |
uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : |
| 7071 |
BXE_PATH0_LOAD_CNT_MASK; |
| 7072 |
uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : |
| 7073 |
BXE_PATH0_LOAD_CNT_SHIFT; |
| 7074 |
|
| 7075 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 7076 |
|
| 7077 |
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); |
| 7078 |
BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); |
| 7079 |
|
| 7080 |
/* get the current counter value */ |
| 7081 |
val1 = ((val & mask) >> shift); |
| 7082 |
|
| 7083 |
/* set bit of this PF */ |
| 7084 |
val1 |= (1 << SC_ABS_FUNC(sc)); |
| 7085 |
|
| 7086 |
/* clear the old value */ |
| 7087 |
val &= ~mask; |
| 7088 |
|
| 7089 |
/* set the new one */ |
| 7090 |
val |= ((val1 << shift) & mask); |
| 7091 |
|
| 7092 |
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); |
| 7093 |
|
| 7094 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 7095 |
} |
| 7096 |
|
| 7097 |
/* clear pf load mark */ |
| 7098 |
/* XXX needs to be under rtnl lock */ |
| 7099 |
static uint8_t |
| 7100 |
bxe_clear_pf_load(struct bxe_softc *sc) |
| 7101 |
{ |
| 7102 |
uint32_t val1, val; |
| 7103 |
uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : |
| 7104 |
BXE_PATH0_LOAD_CNT_MASK; |
| 7105 |
uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : |
| 7106 |
BXE_PATH0_LOAD_CNT_SHIFT; |
| 7107 |
|
| 7108 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 7109 |
val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); |
| 7110 |
BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); |
| 7111 |
|
| 7112 |
/* get the current counter value */ |
| 7113 |
val1 = (val & mask) >> shift; |
| 7114 |
|
| 7115 |
/* clear bit of that PF */ |
| 7116 |
val1 &= ~(1 << SC_ABS_FUNC(sc)); |
| 7117 |
|
| 7118 |
/* clear the old value */ |
| 7119 |
val &= ~mask; |
| 7120 |
|
| 7121 |
/* set the new one */ |
| 7122 |
val |= ((val1 << shift) & mask); |
| 7123 |
|
| 7124 |
REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); |
| 7125 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); |
| 7126 |
return (val1 != 0); |
| 7127 |
} |
| 7128 |
|
| 7129 |
/* send load requrest to mcp and analyze response */ |
| 7130 |
static int |
| 7131 |
bxe_nic_load_request(struct bxe_softc *sc, |
| 7132 |
uint32_t *load_code) |
| 7133 |
{ |
| 7134 |
/* init fw_seq */ |
| 7135 |
sc->fw_seq = |
| 7136 |
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & |
| 7137 |
DRV_MSG_SEQ_NUMBER_MASK); |
| 7138 |
|
| 7139 |
BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); |
| 7140 |
|
| 7141 |
/* get the current FW pulse sequence */ |
| 7142 |
sc->fw_drv_pulse_wr_seq = |
| 7143 |
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & |
| 7144 |
DRV_PULSE_SEQ_MASK); |
| 7145 |
|
| 7146 |
BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", |
| 7147 |
sc->fw_drv_pulse_wr_seq); |
| 7148 |
|
| 7149 |
/* load request */ |
| 7150 |
(*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, |
| 7151 |
DRV_MSG_CODE_LOAD_REQ_WITH_LFA); |
| 7152 |
|
| 7153 |
/* if the MCP fails to respond we must abort */ |
| 7154 |
if (!(*load_code)) { |
| 7155 |
BLOGE(sc, "MCP response failure!\n"); |
| 7156 |
return (-1); |
| 7157 |
} |
| 7158 |
|
| 7159 |
/* if MCP refused then must abort */ |
| 7160 |
if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { |
| 7161 |
BLOGE(sc, "MCP refused load request\n"); |
| 7162 |
return (-1); |
| 7163 |
} |
| 7164 |
|
| 7165 |
return (0); |
| 7166 |
} |
| 7167 |
|
| 7168 |
/* |
| 7169 |
* Check whether another PF has already loaded FW to chip. In virtualized |
| 7170 |
* environments a pf from anoth VM may have already initialized the device |
| 7171 |
* including loading FW. |
| 7172 |
*/ |
| 7173 |
static int |
| 7174 |
bxe_nic_load_analyze_req(struct bxe_softc *sc, |
| 7175 |
uint32_t load_code) |
| 7176 |
{ |
| 7177 |
uint32_t my_fw, loaded_fw; |
| 7178 |
|
| 7179 |
/* is another pf loaded on this engine? */ |
| 7180 |
if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && |
| 7181 |
(load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { |
| 7182 |
/* build my FW version dword */ |
| 7183 |
my_fw = (BCM_5710_FW_MAJOR_VERSION + |
| 7184 |
(BCM_5710_FW_MINOR_VERSION << 8 ) + |
| 7185 |
(BCM_5710_FW_REVISION_VERSION << 16) + |
| 7186 |
(BCM_5710_FW_ENGINEERING_VERSION << 24)); |
| 7187 |
|
| 7188 |
/* read loaded FW from chip */ |
| 7189 |
loaded_fw = REG_RD(sc, XSEM_REG_PRAM); |
| 7190 |
BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", |
| 7191 |
loaded_fw, my_fw); |
| 7192 |
|
| 7193 |
/* abort nic load if version mismatch */ |
| 7194 |
if (my_fw != loaded_fw) { |
| 7195 |
BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", |
| 7196 |
loaded_fw, my_fw); |
| 7197 |
return (-1); |
| 7198 |
} |
| 7199 |
} |
| 7200 |
|
| 7201 |
return (0); |
| 7202 |
} |
| 7203 |
|
| 7204 |
/* mark PMF if applicable */ |
| 7205 |
static void |
| 7206 |
bxe_nic_load_pmf(struct bxe_softc *sc, |
| 7207 |
uint32_t load_code) |
| 7208 |
{ |
| 7209 |
uint32_t ncsi_oem_data_addr; |
| 7210 |
|
| 7211 |
if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || |
| 7212 |
(load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || |
| 7213 |
(load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { |
| 7214 |
/* |
| 7215 |
* Barrier here for ordering between the writing to sc->port.pmf here |
| 7216 |
* and reading it from the periodic task. |
| 7217 |
*/ |
| 7218 |
sc->port.pmf = 1; |
| 7219 |
mb(); |
| 7220 |
} else { |
| 7221 |
sc->port.pmf = 0; |
| 7222 |
} |
| 7223 |
|
| 7224 |
BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); |
| 7225 |
|
| 7226 |
/* XXX needed? */ |
| 7227 |
if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { |
| 7228 |
if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { |
| 7229 |
ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); |
| 7230 |
if (ncsi_oem_data_addr) { |
| 7231 |
REG_WR(sc, |
| 7232 |
(ncsi_oem_data_addr + |
| 7233 |
offsetof(struct glob_ncsi_oem_data, driver_version)), |
| 7234 |
0); |
| 7235 |
} |
| 7236 |
} |
| 7237 |
} |
| 7238 |
} |
| 7239 |
|
| 7240 |
static void |
| 7241 |
bxe_read_mf_cfg(struct bxe_softc *sc) |
| 7242 |
{ |
| 7243 |
int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); |
| 7244 |
int abs_func; |
| 7245 |
int vn; |
| 7246 |
|
| 7247 |
if (BXE_NOMCP(sc)) { |
| 7248 |
return; /* what should be the default bvalue in this case */ |
| 7249 |
} |
| 7250 |
|
| 7251 |
/* |
| 7252 |
* The formula for computing the absolute function number is... |
| 7253 |
* For 2 port configuration (4 functions per port): |
| 7254 |
* abs_func = 2 * vn + SC_PORT + SC_PATH |
| 7255 |
* For 4 port configuration (2 functions per port): |
| 7256 |
* abs_func = 4 * vn + 2 * SC_PORT + SC_PATH |
| 7257 |
*/ |
| 7258 |
for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { |
| 7259 |
abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); |
| 7260 |
if (abs_func >= E1H_FUNC_MAX) { |
| 7261 |
break; |
| 7262 |
} |
| 7263 |
sc->devinfo.mf_info.mf_config[vn] = |
| 7264 |
MFCFG_RD(sc, func_mf_config[abs_func].config); |
| 7265 |
} |
| 7266 |
|
| 7267 |
if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & |
| 7268 |
FUNC_MF_CFG_FUNC_DISABLED) { |
| 7269 |
BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); |
| 7270 |
sc->flags |= BXE_MF_FUNC_DIS; |
| 7271 |
} else { |
| 7272 |
BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); |
| 7273 |
sc->flags &= ~BXE_MF_FUNC_DIS; |
| 7274 |
} |
| 7275 |
} |
| 7276 |
|
| 7277 |
/* acquire split MCP access lock register */ |
| 7278 |
static int bxe_acquire_alr(struct bxe_softc *sc) |
| 7279 |
{ |
| 7280 |
uint32_t j, val; |
| 7281 |
|
| 7282 |
for (j = 0; j < 1000; j++) { |
| 7283 |
val = (1UL << 31); |
| 7284 |
REG_WR(sc, GRCBASE_MCP + 0x9c, val); |
| 7285 |
val = REG_RD(sc, GRCBASE_MCP + 0x9c); |
| 7286 |
if (val & (1L << 31)) |
| 7287 |
break; |
| 7288 |
|
| 7289 |
DELAY(5000); |
| 7290 |
} |
| 7291 |
|
| 7292 |
if (!(val & (1L << 31))) { |
| 7293 |
BLOGE(sc, "Cannot acquire MCP access lock register\n"); |
| 7294 |
return (-1); |
| 7295 |
} |
| 7296 |
|
| 7297 |
return (0); |
| 7298 |
} |
| 7299 |
|
| 7300 |
/* release split MCP access lock register */ |
| 7301 |
static void bxe_release_alr(struct bxe_softc *sc) |
| 7302 |
{ |
| 7303 |
REG_WR(sc, GRCBASE_MCP + 0x9c, 0); |
| 7304 |
} |
| 7305 |
|
| 7306 |
static void |
| 7307 |
bxe_fan_failure(struct bxe_softc *sc) |
| 7308 |
{ |
| 7309 |
int port = SC_PORT(sc); |
| 7310 |
uint32_t ext_phy_config; |
| 7311 |
|
| 7312 |
/* mark the failure */ |
| 7313 |
ext_phy_config = |
| 7314 |
SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); |
| 7315 |
|
| 7316 |
ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; |
| 7317 |
ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; |
| 7318 |
SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, |
| 7319 |
ext_phy_config); |
| 7320 |
|
| 7321 |
/* log the failure */ |
| 7322 |
BLOGW(sc, "Fan Failure has caused the driver to shutdown " |
| 7323 |
"the card to prevent permanent damage. " |
| 7324 |
"Please contact OEM Support for assistance\n"); |
| 7325 |
|
| 7326 |
/* XXX */ |
| 7327 |
#if 1 |
| 7328 |
bxe_panic(sc, ("Schedule task to handle fan failure\n")); |
| 7329 |
#else |
| 7330 |
/* |
| 7331 |
* Schedule device reset (unload) |
| 7332 |
* This is due to some boards consuming sufficient power when driver is |
| 7333 |
* up to overheat if fan fails. |
| 7334 |
*/ |
| 7335 |
bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); |
| 7336 |
schedule_delayed_work(&sc->sp_rtnl_task, 0); |
| 7337 |
#endif |
| 7338 |
} |
| 7339 |
|
| 7340 |
/* this function is called upon a link interrupt */ |
| 7341 |
static void |
| 7342 |
bxe_link_attn(struct bxe_softc *sc) |
| 7343 |
{ |
| 7344 |
uint32_t pause_enabled = 0; |
| 7345 |
struct host_port_stats *pstats; |
| 7346 |
int cmng_fns; |
| 7347 |
|
| 7348 |
/* Make sure that we are synced with the current statistics */ |
| 7349 |
bxe_stats_handle(sc, STATS_EVENT_STOP); |
| 7350 |
|
| 7351 |
elink_link_update(&sc->link_params, &sc->link_vars); |
| 7352 |
|
| 7353 |
if (sc->link_vars.link_up) { |
| 7354 |
|
| 7355 |
/* dropless flow control */ |
| 7356 |
if (!CHIP_IS_E1(sc) && sc->dropless_fc) { |
| 7357 |
pause_enabled = 0; |
| 7358 |
|
| 7359 |
if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { |
| 7360 |
pause_enabled = 1; |
| 7361 |
} |
| 7362 |
|
| 7363 |
REG_WR(sc, |
| 7364 |
(BAR_USTRORM_INTMEM + |
| 7365 |
USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), |
| 7366 |
pause_enabled); |
| 7367 |
} |
| 7368 |
|
| 7369 |
if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { |
| 7370 |
pstats = BXE_SP(sc, port_stats); |
| 7371 |
/* reset old mac stats */ |
| 7372 |
memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); |
| 7373 |
} |
| 7374 |
|
| 7375 |
if (sc->state == BXE_STATE_OPEN) { |
| 7376 |
bxe_stats_handle(sc, STATS_EVENT_LINK_UP); |
| 7377 |
} |
| 7378 |
} |
| 7379 |
|
| 7380 |
if (sc->link_vars.link_up && sc->link_vars.line_speed) { |
| 7381 |
cmng_fns = bxe_get_cmng_fns_mode(sc); |
| 7382 |
|
| 7383 |
if (cmng_fns != CMNG_FNS_NONE) { |
| 7384 |
bxe_cmng_fns_init(sc, FALSE, cmng_fns); |
| 7385 |
storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); |
| 7386 |
} else { |
| 7387 |
/* rate shaping and fairness are disabled */ |
| 7388 |
BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); |
| 7389 |
} |
| 7390 |
} |
| 7391 |
|
| 7392 |
bxe_link_report_locked(sc); |
| 7393 |
|
| 7394 |
if (IS_MF(sc)) { |
| 7395 |
; // XXX bxe_link_sync_notify(sc); |
| 7396 |
} |
| 7397 |
} |
| 7398 |
|
| 7399 |
static void |
| 7400 |
bxe_attn_int_asserted(struct bxe_softc *sc, |
| 7401 |
uint32_t asserted) |
| 7402 |
{ |
| 7403 |
int port = SC_PORT(sc); |
| 7404 |
uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
| 7405 |
MISC_REG_AEU_MASK_ATTN_FUNC_0; |
| 7406 |
uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : |
| 7407 |
NIG_REG_MASK_INTERRUPT_PORT0; |
| 7408 |
uint32_t aeu_mask; |
| 7409 |
uint32_t nig_mask = 0; |
| 7410 |
uint32_t reg_addr; |
| 7411 |
uint32_t igu_acked; |
| 7412 |
uint32_t cnt; |
| 7413 |
|
| 7414 |
if (sc->attn_state & asserted) { |
| 7415 |
BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); |
| 7416 |
} |
| 7417 |
|
| 7418 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); |
| 7419 |
|
| 7420 |
aeu_mask = REG_RD(sc, aeu_addr); |
| 7421 |
|
| 7422 |
BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", |
| 7423 |
aeu_mask, asserted); |
| 7424 |
|
| 7425 |
aeu_mask &= ~(asserted & 0x3ff); |
| 7426 |
|
| 7427 |
BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); |
| 7428 |
|
| 7429 |
REG_WR(sc, aeu_addr, aeu_mask); |
| 7430 |
|
| 7431 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); |
| 7432 |
|
| 7433 |
BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); |
| 7434 |
sc->attn_state |= asserted; |
| 7435 |
BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); |
| 7436 |
|
| 7437 |
if (asserted & ATTN_HARD_WIRED_MASK) { |
| 7438 |
if (asserted & ATTN_NIG_FOR_FUNC) { |
| 7439 |
|
| 7440 |
BXE_PHY_LOCK(sc); |
| 7441 |
|
| 7442 |
/* save nig interrupt mask */ |
| 7443 |
nig_mask = REG_RD(sc, nig_int_mask_addr); |
| 7444 |
|
| 7445 |
/* If nig_mask is not set, no need to call the update function */ |
| 7446 |
if (nig_mask) { |
| 7447 |
REG_WR(sc, nig_int_mask_addr, 0); |
| 7448 |
|
| 7449 |
bxe_link_attn(sc); |
| 7450 |
} |
| 7451 |
|
| 7452 |
/* handle unicore attn? */ |
| 7453 |
} |
| 7454 |
|
| 7455 |
if (asserted & ATTN_SW_TIMER_4_FUNC) { |
| 7456 |
BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); |
| 7457 |
} |
| 7458 |
|
| 7459 |
if (asserted & GPIO_2_FUNC) { |
| 7460 |
BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); |
| 7461 |
} |
| 7462 |
|
| 7463 |
if (asserted & GPIO_3_FUNC) { |
| 7464 |
BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); |
| 7465 |
} |
| 7466 |
|
| 7467 |
if (asserted & GPIO_4_FUNC) { |
| 7468 |
BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); |
| 7469 |
} |
| 7470 |
|
| 7471 |
if (port == 0) { |
| 7472 |
if (asserted & ATTN_GENERAL_ATTN_1) { |
| 7473 |
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); |
| 7474 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); |
| 7475 |
} |
| 7476 |
if (asserted & ATTN_GENERAL_ATTN_2) { |
| 7477 |
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); |
| 7478 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); |
| 7479 |
} |
| 7480 |
if (asserted & ATTN_GENERAL_ATTN_3) { |
| 7481 |
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); |
| 7482 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); |
| 7483 |
} |
| 7484 |
} else { |
| 7485 |
if (asserted & ATTN_GENERAL_ATTN_4) { |
| 7486 |
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); |
| 7487 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); |
| 7488 |
} |
| 7489 |
if (asserted & ATTN_GENERAL_ATTN_5) { |
| 7490 |
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); |
| 7491 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); |
| 7492 |
} |
| 7493 |
if (asserted & ATTN_GENERAL_ATTN_6) { |
| 7494 |
BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); |
| 7495 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); |
| 7496 |
} |
| 7497 |
} |
| 7498 |
} /* hardwired */ |
| 7499 |
|
| 7500 |
if (sc->devinfo.int_block == INT_BLOCK_HC) { |
| 7501 |
reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); |
| 7502 |
} else { |
| 7503 |
reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); |
| 7504 |
} |
| 7505 |
|
| 7506 |
BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", |
| 7507 |
asserted, |
| 7508 |
(sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); |
| 7509 |
REG_WR(sc, reg_addr, asserted); |
| 7510 |
|
| 7511 |
/* now set back the mask */ |
| 7512 |
if (asserted & ATTN_NIG_FOR_FUNC) { |
| 7513 |
/* |
| 7514 |
* Verify that IGU ack through BAR was written before restoring |
| 7515 |
* NIG mask. This loop should exit after 2-3 iterations max. |
| 7516 |
*/ |
| 7517 |
if (sc->devinfo.int_block != INT_BLOCK_HC) { |
| 7518 |
cnt = 0; |
| 7519 |
|
| 7520 |
do { |
| 7521 |
igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); |
| 7522 |
} while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && |
| 7523 |
(++cnt < MAX_IGU_ATTN_ACK_TO)); |
| 7524 |
|
| 7525 |
if (!igu_acked) { |
| 7526 |
BLOGE(sc, "Failed to verify IGU ack on time\n"); |
| 7527 |
} |
| 7528 |
|
| 7529 |
mb(); |
| 7530 |
} |
| 7531 |
|
| 7532 |
REG_WR(sc, nig_int_mask_addr, nig_mask); |
| 7533 |
|
| 7534 |
BXE_PHY_UNLOCK(sc); |
| 7535 |
} |
| 7536 |
} |
| 7537 |
|
| 7538 |
static void |
| 7539 |
bxe_print_next_block(struct bxe_softc *sc, |
| 7540 |
int idx, |
| 7541 |
const char *blk) |
| 7542 |
{ |
| 7543 |
BLOGI(sc, "%s%s", idx ? ", " : "", blk); |
| 7544 |
} |
| 7545 |
|
| 7546 |
static int |
| 7547 |
bxe_check_blocks_with_parity0(struct bxe_softc *sc, |
| 7548 |
uint32_t sig, |
| 7549 |
int par_num, |
| 7550 |
uint8_t print) |
| 7551 |
{ |
| 7552 |
uint32_t cur_bit = 0; |
| 7553 |
int i = 0; |
| 7554 |
|
| 7555 |
for (i = 0; sig; i++) { |
| 7556 |
cur_bit = ((uint32_t)0x1 << i); |
| 7557 |
if (sig & cur_bit) { |
| 7558 |
switch (cur_bit) { |
| 7559 |
case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: |
| 7560 |
if (print) |
| 7561 |
bxe_print_next_block(sc, par_num++, "BRB"); |
| 7562 |
break; |
| 7563 |
case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: |
| 7564 |
if (print) |
| 7565 |
bxe_print_next_block(sc, par_num++, "PARSER"); |
| 7566 |
break; |
| 7567 |
case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: |
| 7568 |
if (print) |
| 7569 |
bxe_print_next_block(sc, par_num++, "TSDM"); |
| 7570 |
break; |
| 7571 |
case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: |
| 7572 |
if (print) |
| 7573 |
bxe_print_next_block(sc, par_num++, "SEARCHER"); |
| 7574 |
break; |
| 7575 |
case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: |
| 7576 |
if (print) |
| 7577 |
bxe_print_next_block(sc, par_num++, "TCM"); |
| 7578 |
break; |
| 7579 |
case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: |
| 7580 |
if (print) |
| 7581 |
bxe_print_next_block(sc, par_num++, "TSEMI"); |
| 7582 |
break; |
| 7583 |
case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: |
| 7584 |
if (print) |
| 7585 |
bxe_print_next_block(sc, par_num++, "XPB"); |
| 7586 |
break; |
| 7587 |
} |
| 7588 |
|
| 7589 |
/* Clear the bit */ |
| 7590 |
sig &= ~cur_bit; |
| 7591 |
} |
| 7592 |
} |
| 7593 |
|
| 7594 |
return (par_num); |
| 7595 |
} |
| 7596 |
|
| 7597 |
static int |
| 7598 |
bxe_check_blocks_with_parity1(struct bxe_softc *sc, |
| 7599 |
uint32_t sig, |
| 7600 |
int par_num, |
| 7601 |
uint8_t *global, |
| 7602 |
uint8_t print) |
| 7603 |
{ |
| 7604 |
int i = 0; |
| 7605 |
uint32_t cur_bit = 0; |
| 7606 |
for (i = 0; sig; i++) { |
| 7607 |
cur_bit = ((uint32_t)0x1 << i); |
| 7608 |
if (sig & cur_bit) { |
| 7609 |
switch (cur_bit) { |
| 7610 |
case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: |
| 7611 |
if (print) |
| 7612 |
bxe_print_next_block(sc, par_num++, "PBF"); |
| 7613 |
break; |
| 7614 |
case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: |
| 7615 |
if (print) |
| 7616 |
bxe_print_next_block(sc, par_num++, "QM"); |
| 7617 |
break; |
| 7618 |
case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: |
| 7619 |
if (print) |
| 7620 |
bxe_print_next_block(sc, par_num++, "TM"); |
| 7621 |
break; |
| 7622 |
case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: |
| 7623 |
if (print) |
| 7624 |
bxe_print_next_block(sc, par_num++, "XSDM"); |
| 7625 |
break; |
| 7626 |
case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: |
| 7627 |
if (print) |
| 7628 |
bxe_print_next_block(sc, par_num++, "XCM"); |
| 7629 |
break; |
| 7630 |
case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: |
| 7631 |
if (print) |
| 7632 |
bxe_print_next_block(sc, par_num++, "XSEMI"); |
| 7633 |
break; |
| 7634 |
case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: |
| 7635 |
if (print) |
| 7636 |
bxe_print_next_block(sc, par_num++, "DOORBELLQ"); |
| 7637 |
break; |
| 7638 |
case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: |
| 7639 |
if (print) |
| 7640 |
bxe_print_next_block(sc, par_num++, "NIG"); |
| 7641 |
break; |
| 7642 |
case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: |
| 7643 |
if (print) |
| 7644 |
bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); |
| 7645 |
*global = TRUE; |
| 7646 |
break; |
| 7647 |
case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: |
| 7648 |
if (print) |
| 7649 |
bxe_print_next_block(sc, par_num++, "DEBUG"); |
| 7650 |
break; |
| 7651 |
case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: |
| 7652 |
if (print) |
| 7653 |
bxe_print_next_block(sc, par_num++, "USDM"); |
| 7654 |
break; |
| 7655 |
case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: |
| 7656 |
if (print) |
| 7657 |
bxe_print_next_block(sc, par_num++, "UCM"); |
| 7658 |
break; |
| 7659 |
case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: |
| 7660 |
if (print) |
| 7661 |
bxe_print_next_block(sc, par_num++, "USEMI"); |
| 7662 |
break; |
| 7663 |
case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: |
| 7664 |
if (print) |
| 7665 |
bxe_print_next_block(sc, par_num++, "UPB"); |
| 7666 |
break; |
| 7667 |
case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: |
| 7668 |
if (print) |
| 7669 |
bxe_print_next_block(sc, par_num++, "CSDM"); |
| 7670 |
break; |
| 7671 |
case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: |
| 7672 |
if (print) |
| 7673 |
bxe_print_next_block(sc, par_num++, "CCM"); |
| 7674 |
break; |
| 7675 |
} |
| 7676 |
|
| 7677 |
/* Clear the bit */ |
| 7678 |
sig &= ~cur_bit; |
| 7679 |
} |
| 7680 |
} |
| 7681 |
|
| 7682 |
return (par_num); |
| 7683 |
} |
| 7684 |
|
| 7685 |
static int |
| 7686 |
bxe_check_blocks_with_parity2(struct bxe_softc *sc, |
| 7687 |
uint32_t sig, |
| 7688 |
int par_num, |
| 7689 |
uint8_t print) |
| 7690 |
{ |
| 7691 |
uint32_t cur_bit = 0; |
| 7692 |
int i = 0; |
| 7693 |
|
| 7694 |
for (i = 0; sig; i++) { |
| 7695 |
cur_bit = ((uint32_t)0x1 << i); |
| 7696 |
if (sig & cur_bit) { |
| 7697 |
switch (cur_bit) { |
| 7698 |
case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: |
| 7699 |
if (print) |
| 7700 |
bxe_print_next_block(sc, par_num++, "CSEMI"); |
| 7701 |
break; |
| 7702 |
case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: |
| 7703 |
if (print) |
| 7704 |
bxe_print_next_block(sc, par_num++, "PXP"); |
| 7705 |
break; |
| 7706 |
case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: |
| 7707 |
if (print) |
| 7708 |
bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); |
| 7709 |
break; |
| 7710 |
case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: |
| 7711 |
if (print) |
| 7712 |
bxe_print_next_block(sc, par_num++, "CFC"); |
| 7713 |
break; |
| 7714 |
case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: |
| 7715 |
if (print) |
| 7716 |
bxe_print_next_block(sc, par_num++, "CDU"); |
| 7717 |
break; |
| 7718 |
case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: |
| 7719 |
if (print) |
| 7720 |
bxe_print_next_block(sc, par_num++, "DMAE"); |
| 7721 |
break; |
| 7722 |
case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: |
| 7723 |
if (print) |
| 7724 |
bxe_print_next_block(sc, par_num++, "IGU"); |
| 7725 |
break; |
| 7726 |
case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: |
| 7727 |
if (print) |
| 7728 |
bxe_print_next_block(sc, par_num++, "MISC"); |
| 7729 |
break; |
| 7730 |
} |
| 7731 |
|
| 7732 |
/* Clear the bit */ |
| 7733 |
sig &= ~cur_bit; |
| 7734 |
} |
| 7735 |
} |
| 7736 |
|
| 7737 |
return (par_num); |
| 7738 |
} |
| 7739 |
|
| 7740 |
static int |
| 7741 |
bxe_check_blocks_with_parity3(struct bxe_softc *sc, |
| 7742 |
uint32_t sig, |
| 7743 |
int par_num, |
| 7744 |
uint8_t *global, |
| 7745 |
uint8_t print) |
| 7746 |
{ |
| 7747 |
uint32_t cur_bit = 0; |
| 7748 |
int i = 0; |
| 7749 |
|
| 7750 |
for (i = 0; sig; i++) { |
| 7751 |
cur_bit = ((uint32_t)0x1 << i); |
| 7752 |
if (sig & cur_bit) { |
| 7753 |
switch (cur_bit) { |
| 7754 |
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: |
| 7755 |
if (print) |
| 7756 |
bxe_print_next_block(sc, par_num++, "MCP ROM"); |
| 7757 |
*global = TRUE; |
| 7758 |
break; |
| 7759 |
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: |
| 7760 |
if (print) |
| 7761 |
bxe_print_next_block(sc, par_num++, |
| 7762 |
"MCP UMP RX"); |
| 7763 |
*global = TRUE; |
| 7764 |
break; |
| 7765 |
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: |
| 7766 |
if (print) |
| 7767 |
bxe_print_next_block(sc, par_num++, |
| 7768 |
"MCP UMP TX"); |
| 7769 |
*global = TRUE; |
| 7770 |
break; |
| 7771 |
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: |
| 7772 |
if (print) |
| 7773 |
bxe_print_next_block(sc, par_num++, |
| 7774 |
"MCP SCPAD"); |
| 7775 |
*global = TRUE; |
| 7776 |
break; |
| 7777 |
} |
| 7778 |
|
| 7779 |
/* Clear the bit */ |
| 7780 |
sig &= ~cur_bit; |
| 7781 |
} |
| 7782 |
} |
| 7783 |
|
| 7784 |
return (par_num); |
| 7785 |
} |
| 7786 |
|
| 7787 |
static int |
| 7788 |
bxe_check_blocks_with_parity4(struct bxe_softc *sc, |
| 7789 |
uint32_t sig, |
| 7790 |
int par_num, |
| 7791 |
uint8_t print) |
| 7792 |
{ |
| 7793 |
uint32_t cur_bit = 0; |
| 7794 |
int i = 0; |
| 7795 |
|
| 7796 |
for (i = 0; sig; i++) { |
| 7797 |
cur_bit = ((uint32_t)0x1 << i); |
| 7798 |
if (sig & cur_bit) { |
| 7799 |
switch (cur_bit) { |
| 7800 |
case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: |
| 7801 |
if (print) |
| 7802 |
bxe_print_next_block(sc, par_num++, "PGLUE_B"); |
| 7803 |
break; |
| 7804 |
case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: |
| 7805 |
if (print) |
| 7806 |
bxe_print_next_block(sc, par_num++, "ATC"); |
| 7807 |
break; |
| 7808 |
} |
| 7809 |
|
| 7810 |
/* Clear the bit */ |
| 7811 |
sig &= ~cur_bit; |
| 7812 |
} |
| 7813 |
} |
| 7814 |
|
| 7815 |
return (par_num); |
| 7816 |
} |
| 7817 |
|
| 7818 |
static uint8_t |
| 7819 |
bxe_parity_attn(struct bxe_softc *sc, |
| 7820 |
uint8_t *global, |
| 7821 |
uint8_t print, |
| 7822 |
uint32_t *sig) |
| 7823 |
{ |
| 7824 |
int par_num = 0; |
| 7825 |
|
| 7826 |
if ((sig[0] & HW_PRTY_ASSERT_SET_0) || |
| 7827 |
(sig[1] & HW_PRTY_ASSERT_SET_1) || |
| 7828 |
(sig[2] & HW_PRTY_ASSERT_SET_2) || |
| 7829 |
(sig[3] & HW_PRTY_ASSERT_SET_3) || |
| 7830 |
(sig[4] & HW_PRTY_ASSERT_SET_4)) { |
| 7831 |
BLOGE(sc, "Parity error: HW block parity attention:\n" |
| 7832 |
"[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", |
| 7833 |
(uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), |
| 7834 |
(uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), |
| 7835 |
(uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), |
| 7836 |
(uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), |
| 7837 |
(uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); |
| 7838 |
|
| 7839 |
if (print) |
| 7840 |
BLOGI(sc, "Parity errors detected in blocks: "); |
| 7841 |
|
| 7842 |
par_num = |
| 7843 |
bxe_check_blocks_with_parity0(sc, sig[0] & |
| 7844 |
HW_PRTY_ASSERT_SET_0, |
| 7845 |
par_num, print); |
| 7846 |
par_num = |
| 7847 |
bxe_check_blocks_with_parity1(sc, sig[1] & |
| 7848 |
HW_PRTY_ASSERT_SET_1, |
| 7849 |
par_num, global, print); |
| 7850 |
par_num = |
| 7851 |
bxe_check_blocks_with_parity2(sc, sig[2] & |
| 7852 |
HW_PRTY_ASSERT_SET_2, |
| 7853 |
par_num, print); |
| 7854 |
par_num = |
| 7855 |
bxe_check_blocks_with_parity3(sc, sig[3] & |
| 7856 |
HW_PRTY_ASSERT_SET_3, |
| 7857 |
par_num, global, print); |
| 7858 |
par_num = |
| 7859 |
bxe_check_blocks_with_parity4(sc, sig[4] & |
| 7860 |
HW_PRTY_ASSERT_SET_4, |
| 7861 |
par_num, print); |
| 7862 |
|
| 7863 |
if (print) |
| 7864 |
BLOGI(sc, "\n"); |
| 7865 |
|
| 7866 |
return (TRUE); |
| 7867 |
} |
| 7868 |
|
| 7869 |
return (FALSE); |
| 7870 |
} |
| 7871 |
|
| 7872 |
static uint8_t |
| 7873 |
bxe_chk_parity_attn(struct bxe_softc *sc, |
| 7874 |
uint8_t *global, |
| 7875 |
uint8_t print) |
| 7876 |
{ |
| 7877 |
struct attn_route attn = { {0} }; |
| 7878 |
int port = SC_PORT(sc); |
| 7879 |
|
| 7880 |
attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); |
| 7881 |
attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); |
| 7882 |
attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); |
| 7883 |
attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); |
| 7884 |
|
| 7885 |
if (!CHIP_IS_E1x(sc)) |
| 7886 |
attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); |
| 7887 |
|
| 7888 |
return (bxe_parity_attn(sc, global, print, attn.sig)); |
| 7889 |
} |
| 7890 |
|
| 7891 |
static void |
| 7892 |
bxe_attn_int_deasserted4(struct bxe_softc *sc, |
| 7893 |
uint32_t attn) |
| 7894 |
{ |
| 7895 |
uint32_t val; |
| 7896 |
|
| 7897 |
if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { |
| 7898 |
val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); |
| 7899 |
BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); |
| 7900 |
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) |
| 7901 |
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); |
| 7902 |
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) |
| 7903 |
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); |
| 7904 |
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) |
| 7905 |
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); |
| 7906 |
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) |
| 7907 |
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); |
| 7908 |
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) |
| 7909 |
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); |
| 7910 |
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) |
| 7911 |
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); |
| 7912 |
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) |
| 7913 |
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); |
| 7914 |
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) |
| 7915 |
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); |
| 7916 |
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) |
| 7917 |
BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); |
| 7918 |
} |
| 7919 |
|
| 7920 |
if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { |
| 7921 |
val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); |
| 7922 |
BLOGE(sc, "ATC hw attention 0x%08x\n", val); |
| 7923 |
if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) |
| 7924 |
BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); |
| 7925 |
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) |
| 7926 |
BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); |
| 7927 |
if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) |
| 7928 |
BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); |
| 7929 |
if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) |
| 7930 |
BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); |
| 7931 |
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) |
| 7932 |
BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); |
| 7933 |
if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) |
| 7934 |
BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); |
| 7935 |
} |
| 7936 |
|
| 7937 |
if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | |
| 7938 |
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { |
| 7939 |
BLOGE(sc, "FATAL parity attention set4 0x%08x\n", |
| 7940 |
(uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | |
| 7941 |
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); |
| 7942 |
} |
| 7943 |
} |
| 7944 |
|
| 7945 |
static void |
| 7946 |
bxe_e1h_disable(struct bxe_softc *sc) |
| 7947 |
{ |
| 7948 |
int port = SC_PORT(sc); |
| 7949 |
|
| 7950 |
bxe_tx_disable(sc); |
| 7951 |
|
| 7952 |
REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); |
| 7953 |
} |
| 7954 |
|
| 7955 |
static void |
| 7956 |
bxe_e1h_enable(struct bxe_softc *sc) |
| 7957 |
{ |
| 7958 |
int port = SC_PORT(sc); |
| 7959 |
|
| 7960 |
REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); |
| 7961 |
|
| 7962 |
// XXX bxe_tx_enable(sc); |
| 7963 |
} |
| 7964 |
|
| 7965 |
/* |
| 7966 |
* called due to MCP event (on pmf): |
| 7967 |
* reread new bandwidth configuration |
| 7968 |
* configure FW |
| 7969 |
* notify others function about the change |
| 7970 |
*/ |
| 7971 |
static void |
| 7972 |
bxe_config_mf_bw(struct bxe_softc *sc) |
| 7973 |
{ |
| 7974 |
if (sc->link_vars.link_up) { |
| 7975 |
bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); |
| 7976 |
// XXX bxe_link_sync_notify(sc); |
| 7977 |
} |
| 7978 |
|
| 7979 |
storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); |
| 7980 |
} |
| 7981 |
|
| 7982 |
static void |
| 7983 |
bxe_set_mf_bw(struct bxe_softc *sc) |
| 7984 |
{ |
| 7985 |
bxe_config_mf_bw(sc); |
| 7986 |
bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); |
| 7987 |
} |
| 7988 |
|
| 7989 |
static void |
| 7990 |
bxe_handle_eee_event(struct bxe_softc *sc) |
| 7991 |
{ |
| 7992 |
BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); |
| 7993 |
bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); |
| 7994 |
} |
| 7995 |
|
| 7996 |
#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 |
| 7997 |
|
| 7998 |
static void |
| 7999 |
bxe_drv_info_ether_stat(struct bxe_softc *sc) |
| 8000 |
{ |
| 8001 |
struct eth_stats_info *ether_stat = |
| 8002 |
&sc->sp->drv_info_to_mcp.ether_stat; |
| 8003 |
|
| 8004 |
strlcpy(ether_stat->version, BXE_DRIVER_VERSION, |
| 8005 |
ETH_STAT_INFO_VERSION_LEN); |
| 8006 |
|
| 8007 |
/* XXX (+ MAC_PAD) taken from other driver... verify this is right */ |
| 8008 |
sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, |
| 8009 |
DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, |
| 8010 |
ether_stat->mac_local + MAC_PAD, |
| 8011 |
MAC_PAD, ETH_ALEN); |
| 8012 |
|
| 8013 |
ether_stat->mtu_size = sc->mtu; |
| 8014 |
|
| 8015 |
ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; |
| 8016 |
if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { |
| 8017 |
ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; |
| 8018 |
} |
| 8019 |
|
| 8020 |
// XXX ether_stat->feature_flags |= ???; |
| 8021 |
|
| 8022 |
ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; |
| 8023 |
|
| 8024 |
ether_stat->txq_size = sc->tx_ring_size; |
| 8025 |
ether_stat->rxq_size = sc->rx_ring_size; |
| 8026 |
} |
| 8027 |
|
| 8028 |
static void |
| 8029 |
bxe_handle_drv_info_req(struct bxe_softc *sc) |
| 8030 |
{ |
| 8031 |
enum drv_info_opcode op_code; |
| 8032 |
uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); |
| 8033 |
|
| 8034 |
/* if drv_info version supported by MFW doesn't match - send NACK */ |
| 8035 |
if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { |
| 8036 |
bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); |
| 8037 |
return; |
| 8038 |
} |
| 8039 |
|
| 8040 |
op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> |
| 8041 |
DRV_INFO_CONTROL_OP_CODE_SHIFT); |
| 8042 |
|
| 8043 |
memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); |
| 8044 |
|
| 8045 |
switch (op_code) { |
| 8046 |
case ETH_STATS_OPCODE: |
| 8047 |
bxe_drv_info_ether_stat(sc); |
| 8048 |
break; |
| 8049 |
case FCOE_STATS_OPCODE: |
| 8050 |
case ISCSI_STATS_OPCODE: |
| 8051 |
default: |
| 8052 |
/* if op code isn't supported - send NACK */ |
| 8053 |
bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); |
| 8054 |
return; |
| 8055 |
} |
| 8056 |
|
| 8057 |
/* |
| 8058 |
* If we got drv_info attn from MFW then these fields are defined in |
| 8059 |
* shmem2 for sure |
| 8060 |
*/ |
| 8061 |
SHMEM2_WR(sc, drv_info_host_addr_lo, |
| 8062 |
U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); |
| 8063 |
SHMEM2_WR(sc, drv_info_host_addr_hi, |
| 8064 |
U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); |
| 8065 |
|
| 8066 |
bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); |
| 8067 |
} |
| 8068 |
|
| 8069 |
static void |
| 8070 |
bxe_dcc_event(struct bxe_softc *sc, |
| 8071 |
uint32_t dcc_event) |
| 8072 |
{ |
| 8073 |
BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); |
| 8074 |
|
| 8075 |
if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { |
| 8076 |
/* |
| 8077 |
* This is the only place besides the function initialization |
| 8078 |
* where the sc->flags can change so it is done without any |
| 8079 |
* locks |
| 8080 |
*/ |
| 8081 |
if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { |
| 8082 |
BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); |
| 8083 |
sc->flags |= BXE_MF_FUNC_DIS; |
| 8084 |
bxe_e1h_disable(sc); |
| 8085 |
} else { |
| 8086 |
BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); |
| 8087 |
sc->flags &= ~BXE_MF_FUNC_DIS; |
| 8088 |
bxe_e1h_enable(sc); |
| 8089 |
} |
| 8090 |
dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; |
| 8091 |
} |
| 8092 |
|
| 8093 |
if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { |
| 8094 |
bxe_config_mf_bw(sc); |
| 8095 |
dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; |
| 8096 |
} |
| 8097 |
|
| 8098 |
/* Report results to MCP */ |
| 8099 |
if (dcc_event) |
| 8100 |
bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); |
| 8101 |
else |
| 8102 |
bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); |
| 8103 |
} |
| 8104 |
|
| 8105 |
static void |
| 8106 |
bxe_pmf_update(struct bxe_softc *sc) |
| 8107 |
{ |
| 8108 |
int port = SC_PORT(sc); |
| 8109 |
uint32_t val; |
| 8110 |
|
| 8111 |
sc->port.pmf = 1; |
| 8112 |
BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); |
| 8113 |
|
| 8114 |
/* |
| 8115 |
* We need the mb() to ensure the ordering between the writing to |
| 8116 |
* sc->port.pmf here and reading it from the bxe_periodic_task(). |
| 8117 |
*/ |
| 8118 |
mb(); |
| 8119 |
|
| 8120 |
/* queue a periodic task */ |
| 8121 |
// XXX schedule task... |
| 8122 |
|
| 8123 |
// XXX bxe_dcbx_pmf_update(sc); |
| 8124 |
|
| 8125 |
/* enable nig attention */ |
| 8126 |
val = (0xff0f | (1 << (SC_VN(sc) + 4))); |
| 8127 |
if (sc->devinfo.int_block == INT_BLOCK_HC) { |
| 8128 |
REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); |
| 8129 |
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); |
| 8130 |
} else if (!CHIP_IS_E1x(sc)) { |
| 8131 |
REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); |
| 8132 |
REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); |
| 8133 |
} |
| 8134 |
|
| 8135 |
bxe_stats_handle(sc, STATS_EVENT_PMF); |
| 8136 |
} |
| 8137 |
|
| 8138 |
static int |
| 8139 |
bxe_mc_assert(struct bxe_softc *sc) |
| 8140 |
{ |
| 8141 |
char last_idx; |
| 8142 |
int i, rc = 0; |
| 8143 |
uint32_t row0, row1, row2, row3; |
| 8144 |
|
| 8145 |
/* XSTORM */ |
| 8146 |
last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); |
| 8147 |
if (last_idx) |
| 8148 |
BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); |
| 8149 |
|
| 8150 |
/* print the asserts */ |
| 8151 |
for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { |
| 8152 |
|
| 8153 |
row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); |
| 8154 |
row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); |
| 8155 |
row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); |
| 8156 |
row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); |
| 8157 |
|
| 8158 |
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { |
| 8159 |
BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", |
| 8160 |
i, row3, row2, row1, row0); |
| 8161 |
rc++; |
| 8162 |
} else { |
| 8163 |
break; |
| 8164 |
} |
| 8165 |
} |
| 8166 |
|
| 8167 |
/* TSTORM */ |
| 8168 |
last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); |
| 8169 |
if (last_idx) { |
| 8170 |
BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); |
| 8171 |
} |
| 8172 |
|
| 8173 |
/* print the asserts */ |
| 8174 |
for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { |
| 8175 |
|
| 8176 |
row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); |
| 8177 |
row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); |
| 8178 |
row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); |
| 8179 |
row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); |
| 8180 |
|
| 8181 |
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { |
| 8182 |
BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", |
| 8183 |
i, row3, row2, row1, row0); |
| 8184 |
rc++; |
| 8185 |
} else { |
| 8186 |
break; |
| 8187 |
} |
| 8188 |
} |
| 8189 |
|
| 8190 |
/* CSTORM */ |
| 8191 |
last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); |
| 8192 |
if (last_idx) { |
| 8193 |
BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); |
| 8194 |
} |
| 8195 |
|
| 8196 |
/* print the asserts */ |
| 8197 |
for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { |
| 8198 |
|
| 8199 |
row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); |
| 8200 |
row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); |
| 8201 |
row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); |
| 8202 |
row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); |
| 8203 |
|
| 8204 |
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { |
| 8205 |
BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", |
| 8206 |
i, row3, row2, row1, row0); |
| 8207 |
rc++; |
| 8208 |
} else { |
| 8209 |
break; |
| 8210 |
} |
| 8211 |
} |
| 8212 |
|
| 8213 |
/* USTORM */ |
| 8214 |
last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); |
| 8215 |
if (last_idx) { |
| 8216 |
BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); |
| 8217 |
} |
| 8218 |
|
| 8219 |
/* print the asserts */ |
| 8220 |
for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { |
| 8221 |
|
| 8222 |
row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); |
| 8223 |
row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); |
| 8224 |
row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); |
| 8225 |
row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); |
| 8226 |
|
| 8227 |
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { |
| 8228 |
BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", |
| 8229 |
i, row3, row2, row1, row0); |
| 8230 |
rc++; |
| 8231 |
} else { |
| 8232 |
break; |
| 8233 |
} |
| 8234 |
} |
| 8235 |
|
| 8236 |
return (rc); |
| 8237 |
} |
| 8238 |
|
| 8239 |
static void |
| 8240 |
bxe_attn_int_deasserted3(struct bxe_softc *sc, |
| 8241 |
uint32_t attn) |
| 8242 |
{ |
| 8243 |
int func = SC_FUNC(sc); |
| 8244 |
uint32_t val; |
| 8245 |
|
| 8246 |
if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { |
| 8247 |
|
| 8248 |
if (attn & BXE_PMF_LINK_ASSERT(sc)) { |
| 8249 |
|
| 8250 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); |
| 8251 |
bxe_read_mf_cfg(sc); |
| 8252 |
sc->devinfo.mf_info.mf_config[SC_VN(sc)] = |
| 8253 |
MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); |
| 8254 |
val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); |
| 8255 |
|
| 8256 |
if (val & DRV_STATUS_DCC_EVENT_MASK) |
| 8257 |
bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); |
| 8258 |
|
| 8259 |
if (val & DRV_STATUS_SET_MF_BW) |
| 8260 |
bxe_set_mf_bw(sc); |
| 8261 |
|
| 8262 |
if (val & DRV_STATUS_DRV_INFO_REQ) |
| 8263 |
bxe_handle_drv_info_req(sc); |
| 8264 |
|
| 8265 |
#if 0 |
| 8266 |
if (val & DRV_STATUS_VF_DISABLED) |
| 8267 |
bxe_vf_handle_flr_event(sc); |
| 8268 |
#endif |
| 8269 |
|
| 8270 |
if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) |
| 8271 |
bxe_pmf_update(sc); |
| 8272 |
|
| 8273 |
#if 0 |
| 8274 |
if (sc->port.pmf && |
| 8275 |
(val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && |
| 8276 |
(sc->dcbx_enabled > 0)) |
| 8277 |
/* start dcbx state machine */ |
| 8278 |
bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED); |
| 8279 |
#endif |
| 8280 |
|
| 8281 |
#if 0 |
| 8282 |
if (val & DRV_STATUS_AFEX_EVENT_MASK) |
| 8283 |
bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK); |
| 8284 |
#endif |
| 8285 |
|
| 8286 |
if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) |
| 8287 |
bxe_handle_eee_event(sc); |
| 8288 |
|
| 8289 |
if (sc->link_vars.periodic_flags & |
| 8290 |
ELINK_PERIODIC_FLAGS_LINK_EVENT) { |
| 8291 |
/* sync with link */ |
| 8292 |
BXE_PHY_LOCK(sc); |
| 8293 |
sc->link_vars.periodic_flags &= |
| 8294 |
~ELINK_PERIODIC_FLAGS_LINK_EVENT; |
| 8295 |
BXE_PHY_UNLOCK(sc); |
| 8296 |
if (IS_MF(sc)) |
| 8297 |
; // XXX bxe_link_sync_notify(sc); |
| 8298 |
bxe_link_report(sc); |
| 8299 |
} |
| 8300 |
|
| 8301 |
/* |
| 8302 |
* Always call it here: bxe_link_report() will |
| 8303 |
* prevent the link indication duplication. |
| 8304 |
*/ |
| 8305 |
bxe_link_status_update(sc); |
| 8306 |
|
| 8307 |
} else if (attn & BXE_MC_ASSERT_BITS) { |
| 8308 |
|
| 8309 |
BLOGE(sc, "MC assert!\n"); |
| 8310 |
bxe_mc_assert(sc); |
| 8311 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); |
| 8312 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); |
| 8313 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); |
| 8314 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); |
| 8315 |
bxe_panic(sc, ("MC assert!\n")); |
| 8316 |
|
| 8317 |
} else if (attn & BXE_MCP_ASSERT) { |
| 8318 |
|
| 8319 |
BLOGE(sc, "MCP assert!\n"); |
| 8320 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); |
| 8321 |
// XXX bxe_fw_dump(sc); |
| 8322 |
|
| 8323 |
} else { |
| 8324 |
BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); |
| 8325 |
} |
| 8326 |
} |
| 8327 |
|
| 8328 |
if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { |
| 8329 |
BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); |
| 8330 |
if (attn & BXE_GRC_TIMEOUT) { |
| 8331 |
val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); |
| 8332 |
BLOGE(sc, "GRC time-out 0x%08x\n", val); |
| 8333 |
} |
| 8334 |
if (attn & BXE_GRC_RSV) { |
| 8335 |
val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); |
| 8336 |
BLOGE(sc, "GRC reserved 0x%08x\n", val); |
| 8337 |
} |
| 8338 |
REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); |
| 8339 |
} |
| 8340 |
} |
| 8341 |
|
| 8342 |
static void |
| 8343 |
bxe_attn_int_deasserted2(struct bxe_softc *sc, |
| 8344 |
uint32_t attn) |
| 8345 |
{ |
| 8346 |
int port = SC_PORT(sc); |
| 8347 |
int reg_offset; |
| 8348 |
uint32_t val0, mask0, val1, mask1; |
| 8349 |
uint32_t val; |
| 8350 |
|
| 8351 |
if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { |
| 8352 |
val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); |
| 8353 |
BLOGE(sc, "CFC hw attention 0x%08x\n", val); |
| 8354 |
/* CFC error attention */ |
| 8355 |
if (val & 0x2) { |
| 8356 |
BLOGE(sc, "FATAL error from CFC\n"); |
| 8357 |
} |
| 8358 |
} |
| 8359 |
|
| 8360 |
if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { |
| 8361 |
val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); |
| 8362 |
BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); |
| 8363 |
/* RQ_USDMDP_FIFO_OVERFLOW */ |
| 8364 |
if (val & 0x18000) { |
| 8365 |
BLOGE(sc, "FATAL error from PXP\n"); |
| 8366 |
} |
| 8367 |
|
| 8368 |
if (!CHIP_IS_E1x(sc)) { |
| 8369 |
val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); |
| 8370 |
BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); |
| 8371 |
} |
| 8372 |
} |
| 8373 |
|
| 8374 |
#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR |
| 8375 |
#define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |
| 8376 |
|
| 8377 |
if (attn & AEU_PXP2_HW_INT_BIT) { |
| 8378 |
/* CQ47854 workaround do not panic on |
| 8379 |
* PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR |
| 8380 |
*/ |
| 8381 |
if (!CHIP_IS_E1x(sc)) { |
| 8382 |
mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); |
| 8383 |
val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); |
| 8384 |
mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); |
| 8385 |
val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); |
| 8386 |
/* |
| 8387 |
* If the olny PXP2_EOP_ERROR_BIT is set in |
| 8388 |
* STS0 and STS1 - clear it |
| 8389 |
* |
| 8390 |
* probably we lose additional attentions between |
| 8391 |
* STS0 and STS_CLR0, in this case user will not |
| 8392 |
* be notified about them |
| 8393 |
*/ |
| 8394 |
if (val0 & mask0 & PXP2_EOP_ERROR_BIT && |
| 8395 |
!(val1 & mask1)) |
| 8396 |
val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); |
| 8397 |
|
| 8398 |
/* print the register, since no one can restore it */ |
| 8399 |
BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); |
| 8400 |
|
| 8401 |
/* |
| 8402 |
* if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR |
| 8403 |
* then notify |
| 8404 |
*/ |
| 8405 |
if (val0 & PXP2_EOP_ERROR_BIT) { |
| 8406 |
BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); |
| 8407 |
|
| 8408 |
/* |
| 8409 |
* if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is |
| 8410 |
* set then clear attention from PXP2 block without panic |
| 8411 |
*/ |
| 8412 |
if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && |
| 8413 |
((val1 & mask1) == 0)) |
| 8414 |
attn &= ~AEU_PXP2_HW_INT_BIT; |
| 8415 |
} |
| 8416 |
} |
| 8417 |
} |
| 8418 |
|
| 8419 |
if (attn & HW_INTERRUT_ASSERT_SET_2) { |
| 8420 |
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : |
| 8421 |
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); |
| 8422 |
|
| 8423 |
val = REG_RD(sc, reg_offset); |
| 8424 |
val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); |
| 8425 |
REG_WR(sc, reg_offset, val); |
| 8426 |
|
| 8427 |
BLOGE(sc, "FATAL HW block attention set2 0x%x\n", |
| 8428 |
(uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); |
| 8429 |
bxe_panic(sc, ("HW block attention set2\n")); |
| 8430 |
} |
| 8431 |
} |
| 8432 |
|
| 8433 |
static void |
| 8434 |
bxe_attn_int_deasserted1(struct bxe_softc *sc, |
| 8435 |
uint32_t attn) |
| 8436 |
{ |
| 8437 |
int port = SC_PORT(sc); |
| 8438 |
int reg_offset; |
| 8439 |
uint32_t val; |
| 8440 |
|
| 8441 |
if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { |
| 8442 |
val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); |
| 8443 |
BLOGE(sc, "DB hw attention 0x%08x\n", val); |
| 8444 |
/* DORQ discard attention */ |
| 8445 |
if (val & 0x2) { |
| 8446 |
BLOGE(sc, "FATAL error from DORQ\n"); |
| 8447 |
} |
| 8448 |
} |
| 8449 |
|
| 8450 |
if (attn & HW_INTERRUT_ASSERT_SET_1) { |
| 8451 |
reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : |
| 8452 |
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); |
| 8453 |
|
| 8454 |
val = REG_RD(sc, reg_offset); |
| 8455 |
val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); |
| 8456 |
REG_WR(sc, reg_offset, val); |
| 8457 |
|
| 8458 |
BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", |
| 8459 |
(uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); |
| 8460 |
bxe_panic(sc, ("HW block attention set1\n")); |
| 8461 |
} |
| 8462 |
} |
| 8463 |
|
| 8464 |
static void |
| 8465 |
bxe_attn_int_deasserted0(struct bxe_softc *sc, |
| 8466 |
uint32_t attn) |
| 8467 |
{ |
| 8468 |
int port = SC_PORT(sc); |
| 8469 |
int reg_offset; |
| 8470 |
uint32_t val; |
| 8471 |
|
| 8472 |
reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
| 8473 |
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; |
| 8474 |
|
| 8475 |
if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { |
| 8476 |
val = REG_RD(sc, reg_offset); |
| 8477 |
val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; |
| 8478 |
REG_WR(sc, reg_offset, val); |
| 8479 |
|
| 8480 |
BLOGW(sc, "SPIO5 hw attention\n"); |
| 8481 |
|
| 8482 |
/* Fan failure attention */ |
| 8483 |
elink_hw_reset_phy(&sc->link_params); |
| 8484 |
bxe_fan_failure(sc); |
| 8485 |
} |
| 8486 |
|
| 8487 |
if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { |
| 8488 |
BXE_PHY_LOCK(sc); |
| 8489 |
elink_handle_module_detect_int(&sc->link_params); |
| 8490 |
BXE_PHY_UNLOCK(sc); |
| 8491 |
} |
| 8492 |
|
| 8493 |
if (attn & HW_INTERRUT_ASSERT_SET_0) { |
| 8494 |
val = REG_RD(sc, reg_offset); |
| 8495 |
val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); |
| 8496 |
REG_WR(sc, reg_offset, val); |
| 8497 |
|
| 8498 |
bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", |
| 8499 |
(attn & HW_INTERRUT_ASSERT_SET_0))); |
| 8500 |
} |
| 8501 |
} |
| 8502 |
|
| 8503 |
static void |
| 8504 |
bxe_attn_int_deasserted(struct bxe_softc *sc, |
| 8505 |
uint32_t deasserted) |
| 8506 |
{ |
| 8507 |
struct attn_route attn; |
| 8508 |
struct attn_route *group_mask; |
| 8509 |
int port = SC_PORT(sc); |
| 8510 |
int index; |
| 8511 |
uint32_t reg_addr; |
| 8512 |
uint32_t val; |
| 8513 |
uint32_t aeu_mask; |
| 8514 |
uint8_t global = FALSE; |
| 8515 |
|
| 8516 |
/* |
| 8517 |
* Need to take HW lock because MCP or other port might also |
| 8518 |
* try to handle this event. |
| 8519 |
*/ |
| 8520 |
bxe_acquire_alr(sc); |
| 8521 |
|
| 8522 |
if (bxe_chk_parity_attn(sc, &global, TRUE)) { |
| 8523 |
/* XXX |
| 8524 |
* In case of parity errors don't handle attentions so that |
| 8525 |
* other function would "see" parity errors. |
| 8526 |
*/ |
| 8527 |
sc->recovery_state = BXE_RECOVERY_INIT; |
| 8528 |
// XXX schedule a recovery task... |
| 8529 |
/* disable HW interrupts */ |
| 8530 |
bxe_int_disable(sc); |
| 8531 |
bxe_release_alr(sc); |
| 8532 |
return; |
| 8533 |
} |
| 8534 |
|
| 8535 |
attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); |
| 8536 |
attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); |
| 8537 |
attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); |
| 8538 |
attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); |
| 8539 |
if (!CHIP_IS_E1x(sc)) { |
| 8540 |
attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); |
| 8541 |
} else { |
| 8542 |
attn.sig[4] = 0; |
| 8543 |
} |
| 8544 |
|
| 8545 |
BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", |
| 8546 |
attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); |
| 8547 |
|
| 8548 |
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
| 8549 |
if (deasserted & (1 << index)) { |
| 8550 |
group_mask = &sc->attn_group[index]; |
| 8551 |
|
| 8552 |
BLOGD(sc, DBG_INTR, |
| 8553 |
"group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, |
| 8554 |
group_mask->sig[0], group_mask->sig[1], |
| 8555 |
group_mask->sig[2], group_mask->sig[3], |
| 8556 |
group_mask->sig[4]); |
| 8557 |
|
| 8558 |
bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); |
| 8559 |
bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); |
| 8560 |
bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); |
| 8561 |
bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); |
| 8562 |
bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); |
| 8563 |
} |
| 8564 |
} |
| 8565 |
|
| 8566 |
bxe_release_alr(sc); |
| 8567 |
|
| 8568 |
if (sc->devinfo.int_block == INT_BLOCK_HC) { |
| 8569 |
reg_addr = (HC_REG_COMMAND_REG + port*32 + |
| 8570 |
COMMAND_REG_ATTN_BITS_CLR); |
| 8571 |
} else { |
| 8572 |
reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); |
| 8573 |
} |
| 8574 |
|
| 8575 |
val = ~deasserted; |
| 8576 |
BLOGD(sc, DBG_INTR, |
| 8577 |
"about to mask 0x%08x at %s addr 0x%08x\n", val, |
| 8578 |
(sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); |
| 8579 |
REG_WR(sc, reg_addr, val); |
| 8580 |
|
| 8581 |
if (~sc->attn_state & deasserted) { |
| 8582 |
BLOGE(sc, "IGU error\n"); |
| 8583 |
} |
| 8584 |
|
| 8585 |
reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
| 8586 |
MISC_REG_AEU_MASK_ATTN_FUNC_0; |
| 8587 |
|
| 8588 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); |
| 8589 |
|
| 8590 |
aeu_mask = REG_RD(sc, reg_addr); |
| 8591 |
|
| 8592 |
BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", |
| 8593 |
aeu_mask, deasserted); |
| 8594 |
aeu_mask |= (deasserted & 0x3ff); |
| 8595 |
BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); |
| 8596 |
|
| 8597 |
REG_WR(sc, reg_addr, aeu_mask); |
| 8598 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); |
| 8599 |
|
| 8600 |
BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); |
| 8601 |
sc->attn_state &= ~deasserted; |
| 8602 |
BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); |
| 8603 |
} |
| 8604 |
|
| 8605 |
static void |
| 8606 |
bxe_attn_int(struct bxe_softc *sc) |
| 8607 |
{ |
| 8608 |
/* read local copy of bits */ |
| 8609 |
uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); |
| 8610 |
uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); |
| 8611 |
uint32_t attn_state = sc->attn_state; |
| 8612 |
|
| 8613 |
/* look for changed bits */ |
| 8614 |
uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; |
| 8615 |
uint32_t deasserted = ~attn_bits & attn_ack & attn_state; |
| 8616 |
|
| 8617 |
BLOGD(sc, DBG_INTR, |
| 8618 |
"attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", |
| 8619 |
attn_bits, attn_ack, asserted, deasserted); |
| 8620 |
|
| 8621 |
if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { |
| 8622 |
BLOGE(sc, "BAD attention state\n"); |
| 8623 |
} |
| 8624 |
|
| 8625 |
/* handle bits that were raised */ |
| 8626 |
if (asserted) { |
| 8627 |
bxe_attn_int_asserted(sc, asserted); |
| 8628 |
} |
| 8629 |
|
| 8630 |
if (deasserted) { |
| 8631 |
bxe_attn_int_deasserted(sc, deasserted); |
| 8632 |
} |
| 8633 |
} |
| 8634 |
|
| 8635 |
static uint16_t |
| 8636 |
bxe_update_dsb_idx(struct bxe_softc *sc) |
| 8637 |
{ |
| 8638 |
struct host_sp_status_block *def_sb = sc->def_sb; |
| 8639 |
uint16_t rc = 0; |
| 8640 |
|
| 8641 |
mb(); /* status block is written to by the chip */ |
| 8642 |
|
| 8643 |
if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { |
| 8644 |
sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; |
| 8645 |
rc |= BXE_DEF_SB_ATT_IDX; |
| 8646 |
} |
| 8647 |
|
| 8648 |
if (sc->def_idx != def_sb->sp_sb.running_index) { |
| 8649 |
sc->def_idx = def_sb->sp_sb.running_index; |
| 8650 |
rc |= BXE_DEF_SB_IDX; |
| 8651 |
} |
| 8652 |
|
| 8653 |
mb(); |
| 8654 |
|
| 8655 |
return (rc); |
| 8656 |
} |
| 8657 |
|
| 8658 |
static inline struct ecore_queue_sp_obj * |
| 8659 |
bxe_cid_to_q_obj(struct bxe_softc *sc, |
| 8660 |
uint32_t cid) |
| 8661 |
{ |
| 8662 |
BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); |
| 8663 |
return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); |
| 8664 |
} |
| 8665 |
|
| 8666 |
static void |
| 8667 |
bxe_handle_mcast_eqe(struct bxe_softc *sc) |
| 8668 |
{ |
| 8669 |
struct ecore_mcast_ramrod_params rparam; |
| 8670 |
int rc; |
| 8671 |
|
| 8672 |
memset(&rparam, 0, sizeof(rparam)); |
| 8673 |
|
| 8674 |
rparam.mcast_obj = &sc->mcast_obj; |
| 8675 |
|
| 8676 |
BXE_MCAST_LOCK(sc); |
| 8677 |
|
| 8678 |
/* clear pending state for the last command */ |
| 8679 |
sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); |
| 8680 |
|
| 8681 |
/* if there are pending mcast commands - send them */ |
| 8682 |
if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { |
| 8683 |
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); |
| 8684 |
if (rc < 0) { |
| 8685 |
BLOGD(sc, DBG_SP, |
| 8686 |
"ERROR: Failed to send pending mcast commands (%d)\n", |
| 8687 |
rc); |
| 8688 |
} |
| 8689 |
} |
| 8690 |
|
| 8691 |
BXE_MCAST_UNLOCK(sc); |
| 8692 |
} |
| 8693 |
|
| 8694 |
static void |
| 8695 |
bxe_handle_classification_eqe(struct bxe_softc *sc, |
| 8696 |
union event_ring_elem *elem) |
| 8697 |
{ |
| 8698 |
unsigned long ramrod_flags = 0; |
| 8699 |
int rc = 0; |
| 8700 |
uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; |
| 8701 |
struct ecore_vlan_mac_obj *vlan_mac_obj; |
| 8702 |
|
| 8703 |
/* always push next commands out, don't wait here */ |
| 8704 |
bit_set(&ramrod_flags, RAMROD_CONT); |
| 8705 |
|
| 8706 |
switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { |
| 8707 |
case ECORE_FILTER_MAC_PENDING: |
| 8708 |
BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); |
| 8709 |
vlan_mac_obj = &sc->sp_objs[cid].mac_obj; |
| 8710 |
break; |
| 8711 |
|
| 8712 |
case ECORE_FILTER_MCAST_PENDING: |
| 8713 |
BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); |
| 8714 |
/* |
| 8715 |
* This is only relevant for 57710 where multicast MACs are |
| 8716 |
* configured as unicast MACs using the same ramrod. |
| 8717 |
*/ |
| 8718 |
bxe_handle_mcast_eqe(sc); |
| 8719 |
return; |
| 8720 |
|
| 8721 |
default: |
| 8722 |
BLOGE(sc, "Unsupported classification command: %d\n", |
| 8723 |
elem->message.data.eth_event.echo); |
| 8724 |
return; |
| 8725 |
} |
| 8726 |
|
| 8727 |
rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); |
| 8728 |
|
| 8729 |
if (rc < 0) { |
| 8730 |
BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); |
| 8731 |
} else if (rc > 0) { |
| 8732 |
BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); |
| 8733 |
} |
| 8734 |
} |
| 8735 |
|
| 8736 |
static void |
| 8737 |
bxe_handle_rx_mode_eqe(struct bxe_softc *sc, |
| 8738 |
union event_ring_elem *elem) |
| 8739 |
{ |
| 8740 |
bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); |
| 8741 |
|
| 8742 |
/* send rx_mode command again if was requested */ |
| 8743 |
if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, |
| 8744 |
&sc->sp_state)) { |
| 8745 |
bxe_set_storm_rx_mode(sc); |
| 8746 |
} |
| 8747 |
#if 0 |
| 8748 |
else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED, |
| 8749 |
&sc->sp_state)) { |
| 8750 |
bxe_set_iscsi_eth_rx_mode(sc, TRUE); |
| 8751 |
} |
| 8752 |
else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED, |
| 8753 |
&sc->sp_state)) { |
| 8754 |
bxe_set_iscsi_eth_rx_mode(sc, FALSE); |
| 8755 |
} |
| 8756 |
#endif |
| 8757 |
} |
| 8758 |
|
| 8759 |
static void |
| 8760 |
bxe_update_eq_prod(struct bxe_softc *sc, |
| 8761 |
uint16_t prod) |
| 8762 |
{ |
| 8763 |
storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); |
| 8764 |
wmb(); /* keep prod updates ordered */ |
| 8765 |
} |
| 8766 |
|
| 8767 |
static void |
| 8768 |
bxe_eq_int(struct bxe_softc *sc) |
| 8769 |
{ |
| 8770 |
uint16_t hw_cons, sw_cons, sw_prod; |
| 8771 |
union event_ring_elem *elem; |
| 8772 |
uint8_t echo; |
| 8773 |
uint32_t cid; |
| 8774 |
uint8_t opcode; |
| 8775 |
int spqe_cnt = 0; |
| 8776 |
struct ecore_queue_sp_obj *q_obj; |
| 8777 |
struct ecore_func_sp_obj *f_obj = &sc->func_obj; |
| 8778 |
struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; |
| 8779 |
|
| 8780 |
hw_cons = le16toh(*sc->eq_cons_sb); |
| 8781 |
|
| 8782 |
/* |
| 8783 |
* The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. |
| 8784 |
* when we get to the next-page we need to adjust so the loop |
| 8785 |
* condition below will be met. The next element is the size of a |
| 8786 |
* regular element and hence incrementing by 1 |
| 8787 |
*/ |
| 8788 |
if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { |
| 8789 |
hw_cons++; |
| 8790 |
} |
| 8791 |
|
| 8792 |
/* |
| 8793 |
* This function may never run in parallel with itself for a |
| 8794 |
* specific sc and no need for a read memory barrier here. |
| 8795 |
*/ |
| 8796 |
sw_cons = sc->eq_cons; |
| 8797 |
sw_prod = sc->eq_prod; |
| 8798 |
|
| 8799 |
BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", |
| 8800 |
hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); |
| 8801 |
|
| 8802 |
for (; |
| 8803 |
sw_cons != hw_cons; |
| 8804 |
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { |
| 8805 |
|
| 8806 |
elem = &sc->eq[EQ_DESC(sw_cons)]; |
| 8807 |
|
| 8808 |
#if 0 |
| 8809 |
int rc; |
| 8810 |
rc = bxe_iov_eq_sp_event(sc, elem); |
| 8811 |
if (!rc) { |
| 8812 |
BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc); |
| 8813 |
goto next_spqe; |
| 8814 |
} |
| 8815 |
#endif |
| 8816 |
|
| 8817 |
/* elem CID originates from FW, actually LE */ |
| 8818 |
cid = SW_CID(elem->message.data.cfc_del_event.cid); |
| 8819 |
opcode = elem->message.opcode; |
| 8820 |
|
| 8821 |
/* handle eq element */ |
| 8822 |
switch (opcode) { |
| 8823 |
#if 0 |
| 8824 |
case EVENT_RING_OPCODE_VF_PF_CHANNEL: |
| 8825 |
BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n"); |
| 8826 |
bxe_vf_mbx(sc, &elem->message.data.vf_pf_event); |
| 8827 |
continue; |
| 8828 |
#endif |
| 8829 |
|
| 8830 |
case EVENT_RING_OPCODE_STAT_QUERY: |
| 8831 |
BLOGD(sc, DBG_SP, "got statistics completion event %d\n", |
| 8832 |
sc->stats_comp++); |
| 8833 |
/* nothing to do with stats comp */ |
| 8834 |
goto next_spqe; |
| 8835 |
|
| 8836 |
case EVENT_RING_OPCODE_CFC_DEL: |
| 8837 |
/* handle according to cid range */ |
| 8838 |
/* we may want to verify here that the sc state is HALTING */ |
| 8839 |
BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); |
| 8840 |
q_obj = bxe_cid_to_q_obj(sc, cid); |
| 8841 |
if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { |
| 8842 |
break; |
| 8843 |
} |
| 8844 |
goto next_spqe; |
| 8845 |
|
| 8846 |
case EVENT_RING_OPCODE_STOP_TRAFFIC: |
| 8847 |
BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); |
| 8848 |
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { |
| 8849 |
break; |
| 8850 |
} |
| 8851 |
// XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); |
| 8852 |
goto next_spqe; |
| 8853 |
|
| 8854 |
case EVENT_RING_OPCODE_START_TRAFFIC: |
| 8855 |
BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); |
| 8856 |
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { |
| 8857 |
break; |
| 8858 |
} |
| 8859 |
// XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); |
| 8860 |
goto next_spqe; |
| 8861 |
|
| 8862 |
case EVENT_RING_OPCODE_FUNCTION_UPDATE: |
| 8863 |
echo = elem->message.data.function_update_event.echo; |
| 8864 |
if (echo == SWITCH_UPDATE) { |
| 8865 |
BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); |
| 8866 |
if (f_obj->complete_cmd(sc, f_obj, |
| 8867 |
ECORE_F_CMD_SWITCH_UPDATE)) { |
| 8868 |
break; |
| 8869 |
} |
| 8870 |
} |
| 8871 |
else { |
| 8872 |
BLOGD(sc, DBG_SP, |
| 8873 |
"AFEX: ramrod completed FUNCTION_UPDATE\n"); |
| 8874 |
#if 0 |
| 8875 |
f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE); |
| 8876 |
/* |
| 8877 |
* We will perform the queues update from the sp_core_task as |
| 8878 |
* all queue SP operations should run with CORE_LOCK. |
| 8879 |
*/ |
| 8880 |
bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state); |
| 8881 |
taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); |
| 8882 |
#endif |
| 8883 |
} |
| 8884 |
goto next_spqe; |
| 8885 |
|
| 8886 |
#if 0 |
| 8887 |
case EVENT_RING_OPCODE_AFEX_VIF_LISTS: |
| 8888 |
f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS); |
| 8889 |
bxe_after_afex_vif_lists(sc, elem); |
| 8890 |
goto next_spqe; |
| 8891 |
#endif |
| 8892 |
|
| 8893 |
case EVENT_RING_OPCODE_FORWARD_SETUP: |
| 8894 |
q_obj = &bxe_fwd_sp_obj(sc, q_obj); |
| 8895 |
if (q_obj->complete_cmd(sc, q_obj, |
| 8896 |
ECORE_Q_CMD_SETUP_TX_ONLY)) { |
| 8897 |
break; |
| 8898 |
} |
| 8899 |
goto next_spqe; |
| 8900 |
|
| 8901 |
case EVENT_RING_OPCODE_FUNCTION_START: |
| 8902 |
BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); |
| 8903 |
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { |
| 8904 |
break; |
| 8905 |
} |
| 8906 |
goto next_spqe; |
| 8907 |
|
| 8908 |
case EVENT_RING_OPCODE_FUNCTION_STOP: |
| 8909 |
BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); |
| 8910 |
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { |
| 8911 |
break; |
| 8912 |
} |
| 8913 |
goto next_spqe; |
| 8914 |
} |
| 8915 |
|
| 8916 |
switch (opcode | sc->state) { |
| 8917 |
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): |
| 8918 |
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): |
| 8919 |
cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; |
| 8920 |
BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); |
| 8921 |
rss_raw->clear_pending(rss_raw); |
| 8922 |
break; |
| 8923 |
|
| 8924 |
case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): |
| 8925 |
case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): |
| 8926 |
case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): |
| 8927 |
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): |
| 8928 |
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): |
| 8929 |
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): |
| 8930 |
BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); |
| 8931 |
bxe_handle_classification_eqe(sc, elem); |
| 8932 |
break; |
| 8933 |
|
| 8934 |
case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): |
| 8935 |
case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): |
| 8936 |
case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): |
| 8937 |
BLOGD(sc, DBG_SP, "got mcast ramrod\n"); |
| 8938 |
bxe_handle_mcast_eqe(sc); |
| 8939 |
break; |
| 8940 |
|
| 8941 |
case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): |
| 8942 |
case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): |
| 8943 |
case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): |
| 8944 |
BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); |
| 8945 |
bxe_handle_rx_mode_eqe(sc, elem); |
| 8946 |
break; |
| 8947 |
|
| 8948 |
default: |
| 8949 |
/* unknown event log error and continue */ |
| 8950 |
BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", |
| 8951 |
elem->message.opcode, sc->state); |
| 8952 |
} |
| 8953 |
|
| 8954 |
next_spqe: |
| 8955 |
spqe_cnt++; |
| 8956 |
} /* for */ |
| 8957 |
|
| 8958 |
mb(); |
| 8959 |
atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); |
| 8960 |
|
| 8961 |
sc->eq_cons = sw_cons; |
| 8962 |
sc->eq_prod = sw_prod; |
| 8963 |
|
| 8964 |
/* make sure that above mem writes were issued towards the memory */ |
| 8965 |
wmb(); |
| 8966 |
|
| 8967 |
/* update producer */ |
| 8968 |
bxe_update_eq_prod(sc, sc->eq_prod); |
| 8969 |
} |
| 8970 |
|
| 8971 |
static void |
| 8972 |
bxe_handle_sp_tq(void *context, |
| 8973 |
int pending) |
| 8974 |
{ |
| 8975 |
struct bxe_softc *sc = (struct bxe_softc *)context; |
| 8976 |
uint16_t status; |
| 8977 |
|
| 8978 |
BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); |
| 8979 |
|
| 8980 |
/* what work needs to be performed? */ |
| 8981 |
status = bxe_update_dsb_idx(sc); |
| 8982 |
|
| 8983 |
BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); |
| 8984 |
|
| 8985 |
/* HW attentions */ |
| 8986 |
if (status & BXE_DEF_SB_ATT_IDX) { |
| 8987 |
BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); |
| 8988 |
bxe_attn_int(sc); |
| 8989 |
status &= ~BXE_DEF_SB_ATT_IDX; |
| 8990 |
} |
| 8991 |
|
| 8992 |
/* SP events: STAT_QUERY and others */ |
| 8993 |
if (status & BXE_DEF_SB_IDX) { |
| 8994 |
/* handle EQ completions */ |
| 8995 |
BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); |
| 8996 |
bxe_eq_int(sc); |
| 8997 |
bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, |
| 8998 |
le16toh(sc->def_idx), IGU_INT_NOP, 1); |
| 8999 |
status &= ~BXE_DEF_SB_IDX; |
| 9000 |
} |
| 9001 |
|
| 9002 |
/* if status is non zero then something went wrong */ |
| 9003 |
if (__predict_false(status)) { |
| 9004 |
BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); |
| 9005 |
} |
| 9006 |
|
| 9007 |
/* ack status block only if something was actually handled */ |
| 9008 |
bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, |
| 9009 |
le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); |
| 9010 |
|
| 9011 |
/* |
| 9012 |
* Must be called after the EQ processing (since eq leads to sriov |
| 9013 |
* ramrod completion flows). |
| 9014 |
* This flow may have been scheduled by the arrival of a ramrod |
| 9015 |
* completion, or by the sriov code rescheduling itself. |
| 9016 |
*/ |
| 9017 |
// XXX bxe_iov_sp_task(sc); |
| 9018 |
|
| 9019 |
#if 0 |
| 9020 |
/* AFEX - poll to check if VIFSET_ACK should be sent to MFW */ |
| 9021 |
if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, |
| 9022 |
&sc->sp_state)) { |
| 9023 |
bxe_link_report(sc); |
| 9024 |
bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); |
| 9025 |
} |
| 9026 |
#endif |
| 9027 |
} |
| 9028 |
|
| 9029 |
static void |
| 9030 |
bxe_handle_fp_tq(void *context, |
| 9031 |
int pending) |
| 9032 |
{ |
| 9033 |
struct bxe_fastpath *fp = (struct bxe_fastpath *)context; |
| 9034 |
struct bxe_softc *sc = fp->sc; |
| 9035 |
uint8_t more_tx = FALSE; |
| 9036 |
uint8_t more_rx = FALSE; |
| 9037 |
|
| 9038 |
BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); |
| 9039 |
|
| 9040 |
/* XXX |
| 9041 |
* IFF_DRV_RUNNING state can't be checked here since we process |
| 9042 |
* slowpath events on a client queue during setup. Instead |
| 9043 |
* we need to add a "process/continue" flag here that the driver |
| 9044 |
* can use to tell the task here not to do anything. |
| 9045 |
*/ |
| 9046 |
#if 0 |
| 9047 |
if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { |
| 9048 |
return; |
| 9049 |
} |
| 9050 |
#endif |
| 9051 |
|
| 9052 |
/* update the fastpath index */ |
| 9053 |
bxe_update_fp_sb_idx(fp); |
| 9054 |
|
| 9055 |
/* XXX add loop here if ever support multiple tx CoS */ |
| 9056 |
/* fp->txdata[cos] */ |
| 9057 |
if (bxe_has_tx_work(fp)) { |
| 9058 |
BXE_FP_TX_LOCK(fp); |
| 9059 |
more_tx = bxe_txeof(sc, fp); |
| 9060 |
BXE_FP_TX_UNLOCK(fp); |
| 9061 |
} |
| 9062 |
|
| 9063 |
if (bxe_has_rx_work(fp)) { |
| 9064 |
more_rx = bxe_rxeof(sc, fp); |
| 9065 |
} |
| 9066 |
|
| 9067 |
if (more_rx /*|| more_tx*/) { |
| 9068 |
/* still more work to do */ |
| 9069 |
taskqueue_enqueue_fast(fp->tq, &fp->tq_task); |
| 9070 |
return; |
| 9071 |
} |
| 9072 |
|
| 9073 |
bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, |
| 9074 |
le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); |
| 9075 |
} |
| 9076 |
|
| 9077 |
static void |
| 9078 |
bxe_task_fp(struct bxe_fastpath *fp) |
| 9079 |
{ |
| 9080 |
struct bxe_softc *sc = fp->sc; |
| 9081 |
uint8_t more_tx = FALSE; |
| 9082 |
uint8_t more_rx = FALSE; |
| 9083 |
|
| 9084 |
BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); |
| 9085 |
|
| 9086 |
/* update the fastpath index */ |
| 9087 |
bxe_update_fp_sb_idx(fp); |
| 9088 |
|
| 9089 |
/* XXX add loop here if ever support multiple tx CoS */ |
| 9090 |
/* fp->txdata[cos] */ |
| 9091 |
if (bxe_has_tx_work(fp)) { |
| 9092 |
BXE_FP_TX_LOCK(fp); |
| 9093 |
more_tx = bxe_txeof(sc, fp); |
| 9094 |
BXE_FP_TX_UNLOCK(fp); |
| 9095 |
} |
| 9096 |
|
| 9097 |
if (bxe_has_rx_work(fp)) { |
| 9098 |
more_rx = bxe_rxeof(sc, fp); |
| 9099 |
} |
| 9100 |
|
| 9101 |
if (more_rx /*|| more_tx*/) { |
| 9102 |
/* still more work to do, bail out if this ISR and process later */ |
| 9103 |
taskqueue_enqueue_fast(fp->tq, &fp->tq_task); |
| 9104 |
return; |
| 9105 |
} |
| 9106 |
|
| 9107 |
/* |
| 9108 |
* Here we write the fastpath index taken before doing any tx or rx work. |
| 9109 |
* It is very well possible other hw events occurred up to this point and |
| 9110 |
* they were actually processed accordingly above. Since we're going to |
| 9111 |
* write an older fastpath index, an interrupt is coming which we might |
| 9112 |
* not do any work in. |
| 9113 |
*/ |
| 9114 |
bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, |
| 9115 |
le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); |
| 9116 |
} |
| 9117 |
|
| 9118 |
/* |
| 9119 |
* Legacy interrupt entry point. |
| 9120 |
* |
| 9121 |
* Verifies that the controller generated the interrupt and |
| 9122 |
* then calls a separate routine to handle the various |
| 9123 |
* interrupt causes: link, RX, and TX. |
| 9124 |
*/ |
| 9125 |
static void |
| 9126 |
bxe_intr_legacy(void *xsc) |
| 9127 |
{ |
| 9128 |
struct bxe_softc *sc = (struct bxe_softc *)xsc; |
| 9129 |
struct bxe_fastpath *fp; |
| 9130 |
uint16_t status, mask; |
| 9131 |
int i; |
| 9132 |
|
| 9133 |
BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); |
| 9134 |
|
| 9135 |
#if 0 |
| 9136 |
/* Don't handle any interrupts if we're not ready. */ |
| 9137 |
if (__predict_false(sc->intr_sem != 0)) { |
| 9138 |
return; |
| 9139 |
} |
| 9140 |
#endif |
| 9141 |
|
| 9142 |
/* |
| 9143 |
* 0 for ustorm, 1 for cstorm |
| 9144 |
* the bits returned from ack_int() are 0-15 |
| 9145 |
* bit 0 = attention status block |
| 9146 |
* bit 1 = fast path status block |
| 9147 |
* a mask of 0x2 or more = tx/rx event |
| 9148 |
* a mask of 1 = slow path event |
| 9149 |
*/ |
| 9150 |
|
| 9151 |
status = bxe_ack_int(sc); |
| 9152 |
|
| 9153 |
/* the interrupt is not for us */ |
| 9154 |
if (__predict_false(status == 0)) { |
| 9155 |
BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); |
| 9156 |
return; |
| 9157 |
} |
| 9158 |
|
| 9159 |
BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); |
| 9160 |
|
| 9161 |
FOR_EACH_ETH_QUEUE(sc, i) { |
| 9162 |
fp = &sc->fp[i]; |
| 9163 |
mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); |
| 9164 |
if (status & mask) { |
| 9165 |
/* acknowledge and disable further fastpath interrupts */ |
| 9166 |
bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); |
| 9167 |
bxe_task_fp(fp); |
| 9168 |
status &= ~mask; |
| 9169 |
} |
| 9170 |
} |
| 9171 |
|
| 9172 |
#if 0 |
| 9173 |
if (CNIC_SUPPORT(sc)) { |
| 9174 |
mask = 0x2; |
| 9175 |
if (status & (mask | 0x1)) { |
| 9176 |
... |
| 9177 |
status &= ~mask; |
| 9178 |
} |
| 9179 |
} |
| 9180 |
#endif |
| 9181 |
|
| 9182 |
if (__predict_false(status & 0x1)) { |
| 9183 |
/* acknowledge and disable further slowpath interrupts */ |
| 9184 |
bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); |
| 9185 |
|
| 9186 |
/* schedule slowpath handler */ |
| 9187 |
taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); |
| 9188 |
|
| 9189 |
status &= ~0x1; |
| 9190 |
} |
| 9191 |
|
| 9192 |
if (__predict_false(status)) { |
| 9193 |
BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); |
| 9194 |
} |
| 9195 |
} |
| 9196 |
|
| 9197 |
/* slowpath interrupt entry point */ |
| 9198 |
static void |
| 9199 |
bxe_intr_sp(void *xsc) |
| 9200 |
{ |
| 9201 |
struct bxe_softc *sc = (struct bxe_softc *)xsc; |
| 9202 |
|
| 9203 |
BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); |
| 9204 |
|
| 9205 |
/* acknowledge and disable further slowpath interrupts */ |
| 9206 |
bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); |
| 9207 |
|
| 9208 |
/* schedule slowpath handler */ |
| 9209 |
taskqueue_enqueue_fast(sc->sp_tq, &sc->sp_tq_task); |
| 9210 |
} |
| 9211 |
|
| 9212 |
/* fastpath interrupt entry point */ |
| 9213 |
static void |
| 9214 |
bxe_intr_fp(void *xfp) |
| 9215 |
{ |
| 9216 |
struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; |
| 9217 |
struct bxe_softc *sc = fp->sc; |
| 9218 |
|
| 9219 |
BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); |
| 9220 |
|
| 9221 |
BLOGD(sc, DBG_INTR, |
| 9222 |
"(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", |
| 9223 |
curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); |
| 9224 |
|
| 9225 |
#if 0 |
| 9226 |
/* Don't handle any interrupts if we're not ready. */ |
| 9227 |
if (__predict_false(sc->intr_sem != 0)) { |
| 9228 |
return; |
| 9229 |
} |
| 9230 |
#endif |
| 9231 |
|
| 9232 |
/* acknowledge and disable further fastpath interrupts */ |
| 9233 |
bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); |
| 9234 |
|
| 9235 |
bxe_task_fp(fp); |
| 9236 |
} |
| 9237 |
|
| 9238 |
/* Release all interrupts allocated by the driver. */ |
| 9239 |
static void |
| 9240 |
bxe_interrupt_free(struct bxe_softc *sc) |
| 9241 |
{ |
| 9242 |
int i; |
| 9243 |
|
| 9244 |
switch (sc->interrupt_mode) { |
| 9245 |
case INTR_MODE_INTX: |
| 9246 |
BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); |
| 9247 |
if (sc->intr[0].resource != NULL) { |
| 9248 |
bus_release_resource(sc->dev, |
| 9249 |
SYS_RES_IRQ, |
| 9250 |
sc->intr[0].rid, |
| 9251 |
sc->intr[0].resource); |
| 9252 |
} |
| 9253 |
break; |
| 9254 |
case INTR_MODE_MSI: |
| 9255 |
for (i = 0; i < sc->intr_count; i++) { |
| 9256 |
BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); |
| 9257 |
if (sc->intr[i].resource && sc->intr[i].rid) { |
| 9258 |
bus_release_resource(sc->dev, |
| 9259 |
SYS_RES_IRQ, |
| 9260 |
sc->intr[i].rid, |
| 9261 |
sc->intr[i].resource); |
| 9262 |
} |
| 9263 |
} |
| 9264 |
pci_release_msi(sc->dev); |
| 9265 |
break; |
| 9266 |
case INTR_MODE_MSIX: |
| 9267 |
for (i = 0; i < sc->intr_count; i++) { |
| 9268 |
BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); |
| 9269 |
if (sc->intr[i].resource && sc->intr[i].rid) { |
| 9270 |
bus_release_resource(sc->dev, |
| 9271 |
SYS_RES_IRQ, |
| 9272 |
sc->intr[i].rid, |
| 9273 |
sc->intr[i].resource); |
| 9274 |
} |
| 9275 |
} |
| 9276 |
pci_release_msi(sc->dev); |
| 9277 |
break; |
| 9278 |
default: |
| 9279 |
/* nothing to do as initial allocation failed */ |
| 9280 |
break; |
| 9281 |
} |
| 9282 |
} |
| 9283 |
|
| 9284 |
/* |
| 9285 |
* This function determines and allocates the appropriate |
| 9286 |
* interrupt based on system capabilites and user request. |
| 9287 |
* |
| 9288 |
* The user may force a particular interrupt mode, specify |
| 9289 |
* the number of receive queues, specify the method for |
| 9290 |
* distribuitng received frames to receive queues, or use |
| 9291 |
* the default settings which will automatically select the |
| 9292 |
* best supported combination. In addition, the OS may or |
| 9293 |
* may not support certain combinations of these settings. |
| 9294 |
* This routine attempts to reconcile the settings requested |
| 9295 |
* by the user with the capabilites available from the system |
| 9296 |
* to select the optimal combination of features. |
| 9297 |
* |
| 9298 |
* Returns: |
| 9299 |
* 0 = Success, !0 = Failure. |
| 9300 |
*/ |
| 9301 |
static int |
| 9302 |
bxe_interrupt_alloc(struct bxe_softc *sc) |
| 9303 |
{ |
| 9304 |
int msix_count = 0; |
| 9305 |
int msi_count = 0; |
| 9306 |
int num_requested = 0; |
| 9307 |
int num_allocated = 0; |
| 9308 |
int rid, i, j; |
| 9309 |
int rc; |
| 9310 |
|
| 9311 |
/* get the number of available MSI/MSI-X interrupts from the OS */ |
| 9312 |
if (sc->interrupt_mode > 0) { |
| 9313 |
if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { |
| 9314 |
msix_count = pci_msix_count(sc->dev); |
| 9315 |
} |
| 9316 |
|
| 9317 |
if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { |
| 9318 |
msi_count = pci_msi_count(sc->dev); |
| 9319 |
} |
| 9320 |
|
| 9321 |
BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", |
| 9322 |
msi_count, msix_count); |
| 9323 |
} |
| 9324 |
|
| 9325 |
do { /* try allocating MSI-X interrupt resources (at least 2) */ |
| 9326 |
if (sc->interrupt_mode != INTR_MODE_MSIX) { |
| 9327 |
break; |
| 9328 |
} |
| 9329 |
|
| 9330 |
if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || |
| 9331 |
(msix_count < 2)) { |
| 9332 |
sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ |
| 9333 |
break; |
| 9334 |
} |
| 9335 |
|
| 9336 |
/* ask for the necessary number of MSI-X vectors */ |
| 9337 |
num_requested = min((sc->num_queues + 1), msix_count); |
| 9338 |
|
| 9339 |
BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); |
| 9340 |
|
| 9341 |
num_allocated = num_requested; |
| 9342 |
if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { |
| 9343 |
BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); |
| 9344 |
sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ |
| 9345 |
break; |
| 9346 |
} |
| 9347 |
|
| 9348 |
if (num_allocated < 2) { /* possible? */ |
| 9349 |
BLOGE(sc, "MSI-X allocation less than 2!\n"); |
| 9350 |
sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ |
| 9351 |
pci_release_msi(sc->dev); |
| 9352 |
break; |
| 9353 |
} |
| 9354 |
|
| 9355 |
BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", |
| 9356 |
num_requested, num_allocated); |
| 9357 |
|
| 9358 |
/* best effort so use the number of vectors allocated to us */ |
| 9359 |
sc->intr_count = num_allocated; |
| 9360 |
sc->num_queues = num_allocated - 1; |
| 9361 |
|
| 9362 |
rid = 1; /* initial resource identifier */ |
| 9363 |
|
| 9364 |
/* allocate the MSI-X vectors */ |
| 9365 |
for (i = 0; i < num_allocated; i++) { |
| 9366 |
sc->intr[i].rid = (rid + i); |
| 9367 |
|
| 9368 |
if ((sc->intr[i].resource = |
| 9369 |
bus_alloc_resource_any(sc->dev, |
| 9370 |
SYS_RES_IRQ, |
| 9371 |
&sc->intr[i].rid, |
| 9372 |
RF_ACTIVE)) == NULL) { |
| 9373 |
BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", |
| 9374 |
i, (rid + i)); |
| 9375 |
|
| 9376 |
for (j = (i - 1); j >= 0; j--) { |
| 9377 |
bus_release_resource(sc->dev, |
| 9378 |
SYS_RES_IRQ, |
| 9379 |
sc->intr[j].rid, |
| 9380 |
sc->intr[j].resource); |
| 9381 |
} |
| 9382 |
|
| 9383 |
sc->intr_count = 0; |
| 9384 |
sc->num_queues = 0; |
| 9385 |
sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ |
| 9386 |
pci_release_msi(sc->dev); |
| 9387 |
break; |
| 9388 |
} |
| 9389 |
|
| 9390 |
BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); |
| 9391 |
} |
| 9392 |
} while (0); |
| 9393 |
|
| 9394 |
do { /* try allocating MSI vector resources (at least 2) */ |
| 9395 |
if (sc->interrupt_mode != INTR_MODE_MSI) { |
| 9396 |
break; |
| 9397 |
} |
| 9398 |
|
| 9399 |
if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || |
| 9400 |
(msi_count < 1)) { |
| 9401 |
sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ |
| 9402 |
break; |
| 9403 |
} |
| 9404 |
|
| 9405 |
/* ask for a single MSI vector */ |
| 9406 |
num_requested = 1; |
| 9407 |
|
| 9408 |
BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); |
| 9409 |
|
| 9410 |
num_allocated = num_requested; |
| 9411 |
if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { |
| 9412 |
BLOGE(sc, "MSI alloc failed (%d)!\n", rc); |
| 9413 |
sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ |
| 9414 |
break; |
| 9415 |
} |
| 9416 |
|
| 9417 |
if (num_allocated != 1) { /* possible? */ |
| 9418 |
BLOGE(sc, "MSI allocation is not 1!\n"); |
| 9419 |
sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ |
| 9420 |
pci_release_msi(sc->dev); |
| 9421 |
break; |
| 9422 |
} |
| 9423 |
|
| 9424 |
BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", |
| 9425 |
num_requested, num_allocated); |
| 9426 |
|
| 9427 |
/* best effort so use the number of vectors allocated to us */ |
| 9428 |
sc->intr_count = num_allocated; |
| 9429 |
sc->num_queues = num_allocated; |
| 9430 |
|
| 9431 |
rid = 1; /* initial resource identifier */ |
| 9432 |
|
| 9433 |
sc->intr[0].rid = rid; |
| 9434 |
|
| 9435 |
if ((sc->intr[0].resource = |
| 9436 |
bus_alloc_resource_any(sc->dev, |
| 9437 |
SYS_RES_IRQ, |
| 9438 |
&sc->intr[0].rid, |
| 9439 |
RF_ACTIVE)) == NULL) { |
| 9440 |
BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); |
| 9441 |
sc->intr_count = 0; |
| 9442 |
sc->num_queues = 0; |
| 9443 |
sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ |
| 9444 |
pci_release_msi(sc->dev); |
| 9445 |
break; |
| 9446 |
} |
| 9447 |
|
| 9448 |
BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); |
| 9449 |
} while (0); |
| 9450 |
|
| 9451 |
do { /* try allocating INTx vector resources */ |
| 9452 |
if (sc->interrupt_mode != INTR_MODE_INTX) { |
| 9453 |
break; |
| 9454 |
} |
| 9455 |
|
| 9456 |
BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); |
| 9457 |
|
| 9458 |
/* only one vector for INTx */ |
| 9459 |
sc->intr_count = 1; |
| 9460 |
sc->num_queues = 1; |
| 9461 |
|
| 9462 |
rid = 0; /* initial resource identifier */ |
| 9463 |
|
| 9464 |
sc->intr[0].rid = rid; |
| 9465 |
|
| 9466 |
if ((sc->intr[0].resource = |
| 9467 |
bus_alloc_resource_any(sc->dev, |
| 9468 |
SYS_RES_IRQ, |
| 9469 |
&sc->intr[0].rid, |
| 9470 |
(RF_ACTIVE | RF_SHAREABLE))) == NULL) { |
| 9471 |
BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); |
| 9472 |
sc->intr_count = 0; |
| 9473 |
sc->num_queues = 0; |
| 9474 |
sc->interrupt_mode = -1; /* Failed! */ |
| 9475 |
break; |
| 9476 |
} |
| 9477 |
|
| 9478 |
BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); |
| 9479 |
} while (0); |
| 9480 |
|
| 9481 |
if (sc->interrupt_mode == -1) { |
| 9482 |
BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); |
| 9483 |
rc = 1; |
| 9484 |
} else { |
| 9485 |
BLOGD(sc, DBG_LOAD, |
| 9486 |
"Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", |
| 9487 |
sc->interrupt_mode, sc->num_queues); |
| 9488 |
rc = 0; |
| 9489 |
} |
| 9490 |
|
| 9491 |
return (rc); |
| 9492 |
} |
| 9493 |
|
| 9494 |
static void |
| 9495 |
bxe_interrupt_detach(struct bxe_softc *sc) |
| 9496 |
{ |
| 9497 |
struct bxe_fastpath *fp; |
| 9498 |
int i; |
| 9499 |
|
| 9500 |
/* release interrupt resources */ |
| 9501 |
for (i = 0; i < sc->intr_count; i++) { |
| 9502 |
if (sc->intr[i].resource && sc->intr[i].tag) { |
| 9503 |
BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); |
| 9504 |
bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); |
| 9505 |
} |
| 9506 |
} |
| 9507 |
|
| 9508 |
for (i = 0; i < sc->num_queues; i++) { |
| 9509 |
fp = &sc->fp[i]; |
| 9510 |
if (fp->tq) { |
| 9511 |
taskqueue_drain(fp->tq, &fp->tq_task); |
| 9512 |
taskqueue_free(fp->tq); |
| 9513 |
fp->tq = NULL; |
| 9514 |
} |
| 9515 |
} |
| 9516 |
|
| 9517 |
if (sc->rx_mode_tq) { |
| 9518 |
taskqueue_drain(sc->rx_mode_tq, &sc->rx_mode_tq_task); |
| 9519 |
taskqueue_free(sc->rx_mode_tq); |
| 9520 |
sc->rx_mode_tq = NULL; |
| 9521 |
} |
| 9522 |
|
| 9523 |
if (sc->sp_tq) { |
| 9524 |
taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); |
| 9525 |
taskqueue_free(sc->sp_tq); |
| 9526 |
sc->sp_tq = NULL; |
| 9527 |
} |
| 9528 |
} |
| 9529 |
|
| 9530 |
/* |
| 9531 |
* Enables interrupts and attach to the ISR. |
| 9532 |
* |
| 9533 |
* When using multiple MSI/MSI-X vectors the first vector |
| 9534 |
* is used for slowpath operations while all remaining |
| 9535 |
* vectors are used for fastpath operations. If only a |
| 9536 |
* single MSI/MSI-X vector is used (SINGLE_ISR) then the |
| 9537 |
* ISR must look for both slowpath and fastpath completions. |
| 9538 |
*/ |
| 9539 |
static int |
| 9540 |
bxe_interrupt_attach(struct bxe_softc *sc) |
| 9541 |
{ |
| 9542 |
struct bxe_fastpath *fp; |
| 9543 |
int rc = 0; |
| 9544 |
int i; |
| 9545 |
|
| 9546 |
snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), |
| 9547 |
"bxe%d_sp_tq", sc->unit); |
| 9548 |
TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); |
| 9549 |
sc->sp_tq = taskqueue_create_fast(sc->sp_tq_name, M_NOWAIT, |
| 9550 |
taskqueue_thread_enqueue, |
| 9551 |
&sc->sp_tq); |
| 9552 |
taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ |
| 9553 |
"%s", sc->sp_tq_name); |
| 9554 |
|
| 9555 |
snprintf(sc->rx_mode_tq_name, sizeof(sc->rx_mode_tq_name), |
| 9556 |
"bxe%d_rx_mode_tq", sc->unit); |
| 9557 |
TASK_INIT(&sc->rx_mode_tq_task, 0, bxe_handle_rx_mode_tq, sc); |
| 9558 |
sc->rx_mode_tq = taskqueue_create_fast(sc->rx_mode_tq_name, M_NOWAIT, |
| 9559 |
taskqueue_thread_enqueue, |
| 9560 |
&sc->rx_mode_tq); |
| 9561 |
taskqueue_start_threads(&sc->rx_mode_tq, 1, PWAIT, /* lower priority */ |
| 9562 |
"%s", sc->rx_mode_tq_name); |
| 9563 |
|
| 9564 |
for (i = 0; i < sc->num_queues; i++) { |
| 9565 |
fp = &sc->fp[i]; |
| 9566 |
snprintf(fp->tq_name, sizeof(fp->tq_name), |
| 9567 |
"bxe%d_fp%d_tq", sc->unit, i); |
| 9568 |
TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); |
| 9569 |
fp->tq = taskqueue_create_fast(fp->tq_name, M_NOWAIT, |
| 9570 |
taskqueue_thread_enqueue, |
| 9571 |
&fp->tq); |
| 9572 |
taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ |
| 9573 |
"%s", fp->tq_name); |
| 9574 |
} |
| 9575 |
|
| 9576 |
/* setup interrupt handlers */ |
| 9577 |
if (sc->interrupt_mode == INTR_MODE_MSIX) { |
| 9578 |
BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); |
| 9579 |
|
| 9580 |
/* |
| 9581 |
* Setup the interrupt handler. Note that we pass the driver instance |
| 9582 |
* to the interrupt handler for the slowpath. |
| 9583 |
*/ |
| 9584 |
if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, |
| 9585 |
(INTR_TYPE_NET | INTR_MPSAFE), |
| 9586 |
NULL, bxe_intr_sp, sc, |
| 9587 |
&sc->intr[0].tag)) != 0) { |
| 9588 |
BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); |
| 9589 |
goto bxe_interrupt_attach_exit; |
| 9590 |
} |
| 9591 |
|
| 9592 |
bus_describe_intr(sc->dev, sc->intr[0].resource, |
| 9593 |
sc->intr[0].tag, "sp"); |
| 9594 |
|
| 9595 |
/* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ |
| 9596 |
|
| 9597 |
/* initialize the fastpath vectors (note the first was used for sp) */ |
| 9598 |
for (i = 0; i < sc->num_queues; i++) { |
| 9599 |
fp = &sc->fp[i]; |
| 9600 |
BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); |
| 9601 |
|
| 9602 |
/* |
| 9603 |
* Setup the interrupt handler. Note that we pass the |
| 9604 |
* fastpath context to the interrupt handler in this |
| 9605 |
* case. |
| 9606 |
*/ |
| 9607 |
if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, |
| 9608 |
(INTR_TYPE_NET | INTR_MPSAFE), |
| 9609 |
NULL, bxe_intr_fp, fp, |
| 9610 |
&sc->intr[i + 1].tag)) != 0) { |
| 9611 |
BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", |
| 9612 |
(i + 1), rc); |
| 9613 |
goto bxe_interrupt_attach_exit; |
| 9614 |
} |
| 9615 |
|
| 9616 |
bus_describe_intr(sc->dev, sc->intr[i + 1].resource, |
| 9617 |
sc->intr[i + 1].tag, "fp%02d", i); |
| 9618 |
|
| 9619 |
/* bind the fastpath instance to a cpu */ |
| 9620 |
if (sc->num_queues > 1) { |
| 9621 |
bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); |
| 9622 |
} |
| 9623 |
|
| 9624 |
fp->state = BXE_FP_STATE_IRQ; |
| 9625 |
} |
| 9626 |
} else if (sc->interrupt_mode == INTR_MODE_MSI) { |
| 9627 |
BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); |
| 9628 |
|
| 9629 |
/* |
| 9630 |
* Setup the interrupt handler. Note that we pass the |
| 9631 |
* driver instance to the interrupt handler which |
| 9632 |
* will handle both the slowpath and fastpath. |
| 9633 |
*/ |
| 9634 |
if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, |
| 9635 |
(INTR_TYPE_NET | INTR_MPSAFE), |
| 9636 |
NULL, bxe_intr_legacy, sc, |
| 9637 |
&sc->intr[0].tag)) != 0) { |
| 9638 |
BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); |
| 9639 |
goto bxe_interrupt_attach_exit; |
| 9640 |
} |
| 9641 |
|
| 9642 |
} else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ |
| 9643 |
BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); |
| 9644 |
|
| 9645 |
/* |
| 9646 |
* Setup the interrupt handler. Note that we pass the |
| 9647 |
* driver instance to the interrupt handler which |
| 9648 |
* will handle both the slowpath and fastpath. |
| 9649 |
*/ |
| 9650 |
if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, |
| 9651 |
(INTR_TYPE_NET | INTR_MPSAFE), |
| 9652 |
NULL, bxe_intr_legacy, sc, |
| 9653 |
&sc->intr[0].tag)) != 0) { |
| 9654 |
BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); |
| 9655 |
goto bxe_interrupt_attach_exit; |
| 9656 |
} |
| 9657 |
} |
| 9658 |
|
| 9659 |
bxe_interrupt_attach_exit: |
| 9660 |
|
| 9661 |
return (rc); |
| 9662 |
} |
| 9663 |
|
| 9664 |
static int bxe_init_hw_common_chip(struct bxe_softc *sc); |
| 9665 |
static int bxe_init_hw_common(struct bxe_softc *sc); |
| 9666 |
static int bxe_init_hw_port(struct bxe_softc *sc); |
| 9667 |
static int bxe_init_hw_func(struct bxe_softc *sc); |
| 9668 |
static void bxe_reset_common(struct bxe_softc *sc); |
| 9669 |
static void bxe_reset_port(struct bxe_softc *sc); |
| 9670 |
static void bxe_reset_func(struct bxe_softc *sc); |
| 9671 |
static int bxe_gunzip_init(struct bxe_softc *sc); |
| 9672 |
static void bxe_gunzip_end(struct bxe_softc *sc); |
| 9673 |
static int bxe_init_firmware(struct bxe_softc *sc); |
| 9674 |
static void bxe_release_firmware(struct bxe_softc *sc); |
| 9675 |
|
| 9676 |
static struct |
| 9677 |
ecore_func_sp_drv_ops bxe_func_sp_drv = { |
| 9678 |
.init_hw_cmn_chip = bxe_init_hw_common_chip, |
| 9679 |
.init_hw_cmn = bxe_init_hw_common, |
| 9680 |
.init_hw_port = bxe_init_hw_port, |
| 9681 |
.init_hw_func = bxe_init_hw_func, |
| 9682 |
|
| 9683 |
.reset_hw_cmn = bxe_reset_common, |
| 9684 |
.reset_hw_port = bxe_reset_port, |
| 9685 |
.reset_hw_func = bxe_reset_func, |
| 9686 |
|
| 9687 |
.gunzip_init = bxe_gunzip_init, |
| 9688 |
.gunzip_end = bxe_gunzip_end, |
| 9689 |
|
| 9690 |
.init_fw = bxe_init_firmware, |
| 9691 |
.release_fw = bxe_release_firmware, |
| 9692 |
}; |
| 9693 |
|
| 9694 |
static void |
| 9695 |
bxe_init_func_obj(struct bxe_softc *sc) |
| 9696 |
{ |
| 9697 |
sc->dmae_ready = 0; |
| 9698 |
|
| 9699 |
ecore_init_func_obj(sc, |
| 9700 |
&sc->func_obj, |
| 9701 |
BXE_SP(sc, func_rdata), |
| 9702 |
BXE_SP_MAPPING(sc, func_rdata), |
| 9703 |
BXE_SP(sc, func_afex_rdata), |
| 9704 |
BXE_SP_MAPPING(sc, func_afex_rdata), |
| 9705 |
&bxe_func_sp_drv); |
| 9706 |
} |
| 9707 |
|
| 9708 |
static int |
| 9709 |
bxe_init_hw(struct bxe_softc *sc, |
| 9710 |
uint32_t load_code) |
| 9711 |
{ |
| 9712 |
struct ecore_func_state_params func_params = { NULL }; |
| 9713 |
int rc; |
| 9714 |
|
| 9715 |
/* prepare the parameters for function state transitions */ |
| 9716 |
bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); |
| 9717 |
|
| 9718 |
func_params.f_obj = &sc->func_obj; |
| 9719 |
func_params.cmd = ECORE_F_CMD_HW_INIT; |
| 9720 |
|
| 9721 |
func_params.params.hw_init.load_phase = load_code; |
| 9722 |
|
| 9723 |
/* |
| 9724 |
* Via a plethora of function pointers, we will eventually reach |
| 9725 |
* bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). |
| 9726 |
*/ |
| 9727 |
rc = ecore_func_state_change(sc, &func_params); |
| 9728 |
|
| 9729 |
return (rc); |
| 9730 |
} |
| 9731 |
|
| 9732 |
static void |
| 9733 |
bxe_fill(struct bxe_softc *sc, |
| 9734 |
uint32_t addr, |
| 9735 |
int fill, |
| 9736 |
uint32_t len) |
| 9737 |
{ |
| 9738 |
uint32_t i; |
| 9739 |
|
| 9740 |
if (!(len % 4) && !(addr % 4)) { |
| 9741 |
for (i = 0; i < len; i += 4) { |
| 9742 |
REG_WR(sc, (addr + i), fill); |
| 9743 |
} |
| 9744 |
} else { |
| 9745 |
for (i = 0; i < len; i++) { |
| 9746 |
REG_WR8(sc, (addr + i), fill); |
| 9747 |
} |
| 9748 |
} |
| 9749 |
} |
| 9750 |
|
| 9751 |
/* writes FP SP data to FW - data_size in dwords */ |
| 9752 |
static void |
| 9753 |
bxe_wr_fp_sb_data(struct bxe_softc *sc, |
| 9754 |
int fw_sb_id, |
| 9755 |
uint32_t *sb_data_p, |
| 9756 |
uint32_t data_size) |
| 9757 |
{ |
| 9758 |
int index; |
| 9759 |
|
| 9760 |
for (index = 0; index < data_size; index++) { |
| 9761 |
REG_WR(sc, |
| 9762 |
(BAR_CSTRORM_INTMEM + |
| 9763 |
CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + |
| 9764 |
(sizeof(uint32_t) * index)), |
| 9765 |
*(sb_data_p + index)); |
| 9766 |
} |
| 9767 |
} |
| 9768 |
|
| 9769 |
static void |
| 9770 |
bxe_zero_fp_sb(struct bxe_softc *sc, |
| 9771 |
int fw_sb_id) |
| 9772 |
{ |
| 9773 |
struct hc_status_block_data_e2 sb_data_e2; |
| 9774 |
struct hc_status_block_data_e1x sb_data_e1x; |
| 9775 |
uint32_t *sb_data_p; |
| 9776 |
uint32_t data_size = 0; |
| 9777 |
|
| 9778 |
if (!CHIP_IS_E1x(sc)) { |
| 9779 |
memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); |
| 9780 |
sb_data_e2.common.state = SB_DISABLED; |
| 9781 |
sb_data_e2.common.p_func.vf_valid = FALSE; |
| 9782 |
sb_data_p = (uint32_t *)&sb_data_e2; |
| 9783 |
data_size = (sizeof(struct hc_status_block_data_e2) / |
| 9784 |
sizeof(uint32_t)); |
| 9785 |
} else { |
| 9786 |
memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); |
| 9787 |
sb_data_e1x.common.state = SB_DISABLED; |
| 9788 |
sb_data_e1x.common.p_func.vf_valid = FALSE; |
| 9789 |
sb_data_p = (uint32_t *)&sb_data_e1x; |
| 9790 |
data_size = (sizeof(struct hc_status_block_data_e1x) / |
| 9791 |
sizeof(uint32_t)); |
| 9792 |
} |
| 9793 |
|
| 9794 |
bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); |
| 9795 |
|
| 9796 |
bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), |
| 9797 |
0, CSTORM_STATUS_BLOCK_SIZE); |
| 9798 |
bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), |
| 9799 |
0, CSTORM_SYNC_BLOCK_SIZE); |
| 9800 |
} |
| 9801 |
|
| 9802 |
static void |
| 9803 |
bxe_wr_sp_sb_data(struct bxe_softc *sc, |
| 9804 |
struct hc_sp_status_block_data *sp_sb_data) |
| 9805 |
{ |
| 9806 |
int i; |
| 9807 |
|
| 9808 |
for (i = 0; |
| 9809 |
i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); |
| 9810 |
i++) { |
| 9811 |
REG_WR(sc, |
| 9812 |
(BAR_CSTRORM_INTMEM + |
| 9813 |
CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + |
| 9814 |
(i * sizeof(uint32_t))), |
| 9815 |
*((uint32_t *)sp_sb_data + i)); |
| 9816 |
} |
| 9817 |
} |
| 9818 |
|
| 9819 |
static void |
| 9820 |
bxe_zero_sp_sb(struct bxe_softc *sc) |
| 9821 |
{ |
| 9822 |
struct hc_sp_status_block_data sp_sb_data; |
| 9823 |
|
| 9824 |
memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); |
| 9825 |
|
| 9826 |
sp_sb_data.state = SB_DISABLED; |
| 9827 |
sp_sb_data.p_func.vf_valid = FALSE; |
| 9828 |
|
| 9829 |
bxe_wr_sp_sb_data(sc, &sp_sb_data); |
| 9830 |
|
| 9831 |
bxe_fill(sc, |
| 9832 |
(BAR_CSTRORM_INTMEM + |
| 9833 |
CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), |
| 9834 |
0, CSTORM_SP_STATUS_BLOCK_SIZE); |
| 9835 |
bxe_fill(sc, |
| 9836 |
(BAR_CSTRORM_INTMEM + |
| 9837 |
CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), |
| 9838 |
0, CSTORM_SP_SYNC_BLOCK_SIZE); |
| 9839 |
} |
| 9840 |
|
| 9841 |
static void |
| 9842 |
bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, |
| 9843 |
int igu_sb_id, |
| 9844 |
int igu_seg_id) |
| 9845 |
{ |
| 9846 |
hc_sm->igu_sb_id = igu_sb_id; |
| 9847 |
hc_sm->igu_seg_id = igu_seg_id; |
| 9848 |
hc_sm->timer_value = 0xFF; |
| 9849 |
hc_sm->time_to_expire = 0xFFFFFFFF; |
| 9850 |
} |
| 9851 |
|
| 9852 |
static void |
| 9853 |
bxe_map_sb_state_machines(struct hc_index_data *index_data) |
| 9854 |
{ |
| 9855 |
/* zero out state machine indices */ |
| 9856 |
|
| 9857 |
/* rx indices */ |
| 9858 |
index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; |
| 9859 |
|
| 9860 |
/* tx indices */ |
| 9861 |
index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; |
| 9862 |
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; |
| 9863 |
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; |
| 9864 |
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; |
| 9865 |
|
| 9866 |
/* map indices */ |
| 9867 |
|
| 9868 |
/* rx indices */ |
| 9869 |
index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= |
| 9870 |
(SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); |
| 9871 |
|
| 9872 |
/* tx indices */ |
| 9873 |
index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= |
| 9874 |
(SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); |
| 9875 |
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= |
| 9876 |
(SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); |
| 9877 |
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= |
| 9878 |
(SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); |
| 9879 |
index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= |
| 9880 |
(SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); |
| 9881 |
} |
| 9882 |
|
| 9883 |
static void |
| 9884 |
bxe_init_sb(struct bxe_softc *sc, |
| 9885 |
bus_addr_t busaddr, |
| 9886 |
int vfid, |
| 9887 |
uint8_t vf_valid, |
| 9888 |
int fw_sb_id, |
| 9889 |
int igu_sb_id) |
| 9890 |
{ |
| 9891 |
struct hc_status_block_data_e2 sb_data_e2; |
| 9892 |
struct hc_status_block_data_e1x sb_data_e1x; |
| 9893 |
struct hc_status_block_sm *hc_sm_p; |
| 9894 |
uint32_t *sb_data_p; |
| 9895 |
int igu_seg_id; |
| 9896 |
int data_size; |
| 9897 |
|
| 9898 |
if (CHIP_INT_MODE_IS_BC(sc)) { |
| 9899 |
igu_seg_id = HC_SEG_ACCESS_NORM; |
| 9900 |
} else { |
| 9901 |
igu_seg_id = IGU_SEG_ACCESS_NORM; |
| 9902 |
} |
| 9903 |
|
| 9904 |
bxe_zero_fp_sb(sc, fw_sb_id); |
| 9905 |
|
| 9906 |
if (!CHIP_IS_E1x(sc)) { |
| 9907 |
memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); |
| 9908 |
sb_data_e2.common.state = SB_ENABLED; |
| 9909 |
sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); |
| 9910 |
sb_data_e2.common.p_func.vf_id = vfid; |
| 9911 |
sb_data_e2.common.p_func.vf_valid = vf_valid; |
| 9912 |
sb_data_e2.common.p_func.vnic_id = SC_VN(sc); |
| 9913 |
sb_data_e2.common.same_igu_sb_1b = TRUE; |
| 9914 |
sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); |
| 9915 |
sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); |
| 9916 |
hc_sm_p = sb_data_e2.common.state_machine; |
| 9917 |
sb_data_p = (uint32_t *)&sb_data_e2; |
| 9918 |
data_size = (sizeof(struct hc_status_block_data_e2) / |
| 9919 |
sizeof(uint32_t)); |
| 9920 |
bxe_map_sb_state_machines(sb_data_e2.index_data); |
| 9921 |
} else { |
| 9922 |
memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); |
| 9923 |
sb_data_e1x.common.state = SB_ENABLED; |
| 9924 |
sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); |
| 9925 |
sb_data_e1x.common.p_func.vf_id = 0xff; |
| 9926 |
sb_data_e1x.common.p_func.vf_valid = FALSE; |
| 9927 |
sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); |
| 9928 |
sb_data_e1x.common.same_igu_sb_1b = TRUE; |
| 9929 |
sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); |
| 9930 |
sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); |
| 9931 |
hc_sm_p = sb_data_e1x.common.state_machine; |
| 9932 |
sb_data_p = (uint32_t *)&sb_data_e1x; |
| 9933 |
data_size = (sizeof(struct hc_status_block_data_e1x) / |
| 9934 |
sizeof(uint32_t)); |
| 9935 |
bxe_map_sb_state_machines(sb_data_e1x.index_data); |
| 9936 |
} |
| 9937 |
|
| 9938 |
bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); |
| 9939 |
bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); |
| 9940 |
|
| 9941 |
BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); |
| 9942 |
|
| 9943 |
/* write indices to HW - PCI guarantees endianity of regpairs */ |
| 9944 |
bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); |
| 9945 |
} |
| 9946 |
|
| 9947 |
static inline uint8_t |
| 9948 |
bxe_fp_qzone_id(struct bxe_fastpath *fp) |
| 9949 |
{ |
| 9950 |
if (CHIP_IS_E1x(fp->sc)) { |
| 9951 |
return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); |
| 9952 |
} else { |
| 9953 |
return (fp->cl_id); |
| 9954 |
} |
| 9955 |
} |
| 9956 |
|
| 9957 |
static inline uint32_t |
| 9958 |
bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, |
| 9959 |
struct bxe_fastpath *fp) |
| 9960 |
{ |
| 9961 |
uint32_t offset = BAR_USTRORM_INTMEM; |
| 9962 |
|
| 9963 |
#if 0 |
| 9964 |
if (IS_VF(sc)) { |
| 9965 |
return (PXP_VF_ADDR_USDM_QUEUES_START + |
| 9966 |
(sc->acquire_resp.resc.hw_qid[fp->index] * |
| 9967 |
sizeof(struct ustorm_queue_zone_data))); |
| 9968 |
} else |
| 9969 |
#endif |
| 9970 |
if (!CHIP_IS_E1x(sc)) { |
| 9971 |
offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); |
| 9972 |
} else { |
| 9973 |
offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); |
| 9974 |
} |
| 9975 |
|
| 9976 |
return (offset); |
| 9977 |
} |
| 9978 |
|
| 9979 |
static void |
| 9980 |
bxe_init_eth_fp(struct bxe_softc *sc, |
| 9981 |
int idx) |
| 9982 |
{ |
| 9983 |
struct bxe_fastpath *fp = &sc->fp[idx]; |
| 9984 |
uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; |
| 9985 |
unsigned long q_type = 0; |
| 9986 |
int cos; |
| 9987 |
|
| 9988 |
fp->sc = sc; |
| 9989 |
fp->index = idx; |
| 9990 |
|
| 9991 |
snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), |
| 9992 |
"bxe%d_fp%d_tx_lock", sc->unit, idx); |
| 9993 |
mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); |
| 9994 |
|
| 9995 |
snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), |
| 9996 |
"bxe%d_fp%d_rx_lock", sc->unit, idx); |
| 9997 |
mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); |
| 9998 |
|
| 9999 |
fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); |
| 10000 |
fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); |
| 10001 |
|
| 10002 |
fp->cl_id = (CHIP_IS_E1x(sc)) ? |
| 10003 |
(SC_L_ID(sc) + idx) : |
| 10004 |
/* want client ID same as IGU SB ID for non-E1 */ |
| 10005 |
fp->igu_sb_id; |
| 10006 |
fp->cl_qzone_id = bxe_fp_qzone_id(fp); |
| 10007 |
|
| 10008 |
/* setup sb indices */ |
| 10009 |
if (!CHIP_IS_E1x(sc)) { |
| 10010 |
fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; |
| 10011 |
fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; |
| 10012 |
} else { |
| 10013 |
fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; |
| 10014 |
fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; |
| 10015 |
} |
| 10016 |
|
| 10017 |
/* init shortcut */ |
| 10018 |
fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); |
| 10019 |
|
| 10020 |
fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; |
| 10021 |
|
| 10022 |
/* |
| 10023 |
* XXX If multiple CoS is ever supported then each fastpath structure |
| 10024 |
* will need to maintain tx producer/consumer/dma/etc values *per* CoS. |
| 10025 |
*/ |
| 10026 |
for (cos = 0; cos < sc->max_cos; cos++) { |
| 10027 |
cids[cos] = idx; |
| 10028 |
} |
| 10029 |
fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; |
| 10030 |
|
| 10031 |
/* nothing more for a VF to do */ |
| 10032 |
if (IS_VF(sc)) { |
| 10033 |
return; |
| 10034 |
} |
| 10035 |
|
| 10036 |
bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, |
| 10037 |
fp->fw_sb_id, fp->igu_sb_id); |
| 10038 |
|
| 10039 |
bxe_update_fp_sb_idx(fp); |
| 10040 |
|
| 10041 |
/* Configure Queue State object */ |
| 10042 |
bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); |
| 10043 |
bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); |
| 10044 |
|
| 10045 |
ecore_init_queue_obj(sc, |
| 10046 |
&sc->sp_objs[idx].q_obj, |
| 10047 |
fp->cl_id, |
| 10048 |
cids, |
| 10049 |
sc->max_cos, |
| 10050 |
SC_FUNC(sc), |
| 10051 |
BXE_SP(sc, q_rdata), |
| 10052 |
BXE_SP_MAPPING(sc, q_rdata), |
| 10053 |
q_type); |
| 10054 |
|
| 10055 |
/* configure classification DBs */ |
| 10056 |
ecore_init_mac_obj(sc, |
| 10057 |
&sc->sp_objs[idx].mac_obj, |
| 10058 |
fp->cl_id, |
| 10059 |
idx, |
| 10060 |
SC_FUNC(sc), |
| 10061 |
BXE_SP(sc, mac_rdata), |
| 10062 |
BXE_SP_MAPPING(sc, mac_rdata), |
| 10063 |
ECORE_FILTER_MAC_PENDING, |
| 10064 |
&sc->sp_state, |
| 10065 |
ECORE_OBJ_TYPE_RX_TX, |
| 10066 |
&sc->macs_pool); |
| 10067 |
|
| 10068 |
BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", |
| 10069 |
idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); |
| 10070 |
} |
| 10071 |
|
| 10072 |
static inline void |
| 10073 |
bxe_update_rx_prod(struct bxe_softc *sc, |
| 10074 |
struct bxe_fastpath *fp, |
| 10075 |
uint16_t rx_bd_prod, |
| 10076 |
uint16_t rx_cq_prod, |
| 10077 |
uint16_t rx_sge_prod) |
| 10078 |
{ |
| 10079 |
struct ustorm_eth_rx_producers rx_prods = { 0 }; |
| 10080 |
uint32_t i; |
| 10081 |
|
| 10082 |
/* update producers */ |
| 10083 |
rx_prods.bd_prod = rx_bd_prod; |
| 10084 |
rx_prods.cqe_prod = rx_cq_prod; |
| 10085 |
rx_prods.sge_prod = rx_sge_prod; |
| 10086 |
|
| 10087 |
/* |
| 10088 |
* Make sure that the BD and SGE data is updated before updating the |
| 10089 |
* producers since FW might read the BD/SGE right after the producer |
| 10090 |
* is updated. |
| 10091 |
* This is only applicable for weak-ordered memory model archs such |
| 10092 |
* as IA-64. The following barrier is also mandatory since FW will |
| 10093 |
* assumes BDs must have buffers. |
| 10094 |
*/ |
| 10095 |
wmb(); |
| 10096 |
|
| 10097 |
for (i = 0; i < (sizeof(rx_prods) / 4); i++) { |
| 10098 |
REG_WR(sc, |
| 10099 |
(fp->ustorm_rx_prods_offset + (i * 4)), |
| 10100 |
((uint32_t *)&rx_prods)[i]); |
| 10101 |
} |
| 10102 |
|
| 10103 |
wmb(); /* keep prod updates ordered */ |
| 10104 |
|
| 10105 |
BLOGD(sc, DBG_RX, |
| 10106 |
"RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", |
| 10107 |
fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); |
| 10108 |
} |
| 10109 |
|
| 10110 |
static void |
| 10111 |
bxe_init_rx_rings(struct bxe_softc *sc) |
| 10112 |
{ |
| 10113 |
struct bxe_fastpath *fp; |
| 10114 |
int i; |
| 10115 |
|
| 10116 |
for (i = 0; i < sc->num_queues; i++) { |
| 10117 |
fp = &sc->fp[i]; |
| 10118 |
|
| 10119 |
fp->rx_bd_cons = 0; |
| 10120 |
|
| 10121 |
/* |
| 10122 |
* Activate the BD ring... |
| 10123 |
* Warning, this will generate an interrupt (to the TSTORM) |
| 10124 |
* so this can only be done after the chip is initialized |
| 10125 |
*/ |
| 10126 |
bxe_update_rx_prod(sc, fp, |
| 10127 |
fp->rx_bd_prod, |
| 10128 |
fp->rx_cq_prod, |
| 10129 |
fp->rx_sge_prod); |
| 10130 |
|
| 10131 |
if (i != 0) { |
| 10132 |
continue; |
| 10133 |
} |
| 10134 |
|
| 10135 |
if (CHIP_IS_E1(sc)) { |
| 10136 |
REG_WR(sc, |
| 10137 |
(BAR_USTRORM_INTMEM + |
| 10138 |
USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), |
| 10139 |
U64_LO(fp->rcq_dma.paddr)); |
| 10140 |
REG_WR(sc, |
| 10141 |
(BAR_USTRORM_INTMEM + |
| 10142 |
USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), |
| 10143 |
U64_HI(fp->rcq_dma.paddr)); |
| 10144 |
} |
| 10145 |
} |
| 10146 |
} |
| 10147 |
|
| 10148 |
static void |
| 10149 |
bxe_init_tx_ring_one(struct bxe_fastpath *fp) |
| 10150 |
{ |
| 10151 |
SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); |
| 10152 |
fp->tx_db.data.zero_fill1 = 0; |
| 10153 |
fp->tx_db.data.prod = 0; |
| 10154 |
|
| 10155 |
fp->tx_pkt_prod = 0; |
| 10156 |
fp->tx_pkt_cons = 0; |
| 10157 |
fp->tx_bd_prod = 0; |
| 10158 |
fp->tx_bd_cons = 0; |
| 10159 |
fp->eth_q_stats.tx_pkts = 0; |
| 10160 |
} |
| 10161 |
|
| 10162 |
static inline void |
| 10163 |
bxe_init_tx_rings(struct bxe_softc *sc) |
| 10164 |
{ |
| 10165 |
int i; |
| 10166 |
|
| 10167 |
for (i = 0; i < sc->num_queues; i++) { |
| 10168 |
#if 0 |
| 10169 |
uint8_t cos; |
| 10170 |
for (cos = 0; cos < sc->max_cos; cos++) { |
| 10171 |
bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]); |
| 10172 |
} |
| 10173 |
#else |
| 10174 |
bxe_init_tx_ring_one(&sc->fp[i]); |
| 10175 |
#endif |
| 10176 |
} |
| 10177 |
} |
| 10178 |
|
| 10179 |
static void |
| 10180 |
bxe_init_def_sb(struct bxe_softc *sc) |
| 10181 |
{ |
| 10182 |
struct host_sp_status_block *def_sb = sc->def_sb; |
| 10183 |
bus_addr_t mapping = sc->def_sb_dma.paddr; |
| 10184 |
int igu_sp_sb_index; |
| 10185 |
int igu_seg_id; |
| 10186 |
int port = SC_PORT(sc); |
| 10187 |
int func = SC_FUNC(sc); |
| 10188 |
int reg_offset, reg_offset_en5; |
| 10189 |
uint64_t section; |
| 10190 |
int index, sindex; |
| 10191 |
struct hc_sp_status_block_data sp_sb_data; |
| 10192 |
|
| 10193 |
memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); |
| 10194 |
|
| 10195 |
if (CHIP_INT_MODE_IS_BC(sc)) { |
| 10196 |
igu_sp_sb_index = DEF_SB_IGU_ID; |
| 10197 |
igu_seg_id = HC_SEG_ACCESS_DEF; |
| 10198 |
} else { |
| 10199 |
igu_sp_sb_index = sc->igu_dsb_id; |
| 10200 |
igu_seg_id = IGU_SEG_ACCESS_DEF; |
| 10201 |
} |
| 10202 |
|
| 10203 |
/* attentions */ |
| 10204 |
section = ((uint64_t)mapping + |
| 10205 |
offsetof(struct host_sp_status_block, atten_status_block)); |
| 10206 |
def_sb->atten_status_block.status_block_id = igu_sp_sb_index; |
| 10207 |
sc->attn_state = 0; |
| 10208 |
|
| 10209 |
reg_offset = (port) ? |
| 10210 |
MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
| 10211 |
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; |
| 10212 |
reg_offset_en5 = (port) ? |
| 10213 |
MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : |
| 10214 |
MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; |
| 10215 |
|
| 10216 |
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
| 10217 |
/* take care of sig[0]..sig[4] */ |
| 10218 |
for (sindex = 0; sindex < 4; sindex++) { |
| 10219 |
sc->attn_group[index].sig[sindex] = |
| 10220 |
REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); |
| 10221 |
} |
| 10222 |
|
| 10223 |
if (!CHIP_IS_E1x(sc)) { |
| 10224 |
/* |
| 10225 |
* enable5 is separate from the rest of the registers, |
| 10226 |
* and the address skip is 4 and not 16 between the |
| 10227 |
* different groups |
| 10228 |
*/ |
| 10229 |
sc->attn_group[index].sig[4] = |
| 10230 |
REG_RD(sc, (reg_offset_en5 + (0x4 * index))); |
| 10231 |
} else { |
| 10232 |
sc->attn_group[index].sig[4] = 0; |
| 10233 |
} |
| 10234 |
} |
| 10235 |
|
| 10236 |
if (sc->devinfo.int_block == INT_BLOCK_HC) { |
| 10237 |
reg_offset = (port) ? |
| 10238 |
HC_REG_ATTN_MSG1_ADDR_L : |
| 10239 |
HC_REG_ATTN_MSG0_ADDR_L; |
| 10240 |
REG_WR(sc, reg_offset, U64_LO(section)); |
| 10241 |
REG_WR(sc, (reg_offset + 4), U64_HI(section)); |
| 10242 |
} else if (!CHIP_IS_E1x(sc)) { |
| 10243 |
REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); |
| 10244 |
REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); |
| 10245 |
} |
| 10246 |
|
| 10247 |
section = ((uint64_t)mapping + |
| 10248 |
offsetof(struct host_sp_status_block, sp_sb)); |
| 10249 |
|
| 10250 |
bxe_zero_sp_sb(sc); |
| 10251 |
|
| 10252 |
/* PCI guarantees endianity of regpair */ |
| 10253 |
sp_sb_data.state = SB_ENABLED; |
| 10254 |
sp_sb_data.host_sb_addr.lo = U64_LO(section); |
| 10255 |
sp_sb_data.host_sb_addr.hi = U64_HI(section); |
| 10256 |
sp_sb_data.igu_sb_id = igu_sp_sb_index; |
| 10257 |
sp_sb_data.igu_seg_id = igu_seg_id; |
| 10258 |
sp_sb_data.p_func.pf_id = func; |
| 10259 |
sp_sb_data.p_func.vnic_id = SC_VN(sc); |
| 10260 |
sp_sb_data.p_func.vf_id = 0xff; |
| 10261 |
|
| 10262 |
bxe_wr_sp_sb_data(sc, &sp_sb_data); |
| 10263 |
|
| 10264 |
bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); |
| 10265 |
} |
| 10266 |
|
| 10267 |
static void |
| 10268 |
bxe_init_sp_ring(struct bxe_softc *sc) |
| 10269 |
{ |
| 10270 |
atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); |
| 10271 |
sc->spq_prod_idx = 0; |
| 10272 |
sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; |
| 10273 |
sc->spq_prod_bd = sc->spq; |
| 10274 |
sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); |
| 10275 |
} |
| 10276 |
|
| 10277 |
static void |
| 10278 |
bxe_init_eq_ring(struct bxe_softc *sc) |
| 10279 |
{ |
| 10280 |
union event_ring_elem *elem; |
| 10281 |
int i; |
| 10282 |
|
| 10283 |
for (i = 1; i <= NUM_EQ_PAGES; i++) { |
| 10284 |
elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; |
| 10285 |
|
| 10286 |
elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + |
| 10287 |
BCM_PAGE_SIZE * |
| 10288 |
(i % NUM_EQ_PAGES))); |
| 10289 |
elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + |
| 10290 |
BCM_PAGE_SIZE * |
| 10291 |
(i % NUM_EQ_PAGES))); |
| 10292 |
} |
| 10293 |
|
| 10294 |
sc->eq_cons = 0; |
| 10295 |
sc->eq_prod = NUM_EQ_DESC; |
| 10296 |
sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; |
| 10297 |
|
| 10298 |
atomic_store_rel_long(&sc->eq_spq_left, |
| 10299 |
(min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), |
| 10300 |
NUM_EQ_DESC) - 1)); |
| 10301 |
} |
| 10302 |
|
| 10303 |
static void |
| 10304 |
bxe_init_internal_common(struct bxe_softc *sc) |
| 10305 |
{ |
| 10306 |
int i; |
| 10307 |
|
| 10308 |
if (IS_MF_SI(sc)) { |
| 10309 |
/* |
| 10310 |
* In switch independent mode, the TSTORM needs to accept |
| 10311 |
* packets that failed classification, since approximate match |
| 10312 |
* mac addresses aren't written to NIG LLH. |
| 10313 |
*/ |
| 10314 |
REG_WR8(sc, |
| 10315 |
(BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), |
| 10316 |
2); |
| 10317 |
} else if (!CHIP_IS_E1(sc)) { /* 57710 doesn't support MF */ |
| 10318 |
REG_WR8(sc, |
| 10319 |
(BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET), |
| 10320 |
0); |
| 10321 |
} |
| 10322 |
|
| 10323 |
/* |
| 10324 |
* Zero this manually as its initialization is currently missing |
| 10325 |
* in the initTool. |
| 10326 |
*/ |
| 10327 |
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { |
| 10328 |
REG_WR(sc, |
| 10329 |
(BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), |
| 10330 |
0); |
| 10331 |
} |
| 10332 |
|
| 10333 |
if (!CHIP_IS_E1x(sc)) { |
| 10334 |
REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), |
| 10335 |
CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); |
| 10336 |
} |
| 10337 |
} |
| 10338 |
|
| 10339 |
static void |
| 10340 |
bxe_init_internal(struct bxe_softc *sc, |
| 10341 |
uint32_t load_code) |
| 10342 |
{ |
| 10343 |
switch (load_code) { |
| 10344 |
case FW_MSG_CODE_DRV_LOAD_COMMON: |
| 10345 |
case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: |
| 10346 |
bxe_init_internal_common(sc); |
| 10347 |
/* no break */ |
| 10348 |
|
| 10349 |
case FW_MSG_CODE_DRV_LOAD_PORT: |
| 10350 |
/* nothing to do */ |
| 10351 |
/* no break */ |
| 10352 |
|
| 10353 |
case FW_MSG_CODE_DRV_LOAD_FUNCTION: |
| 10354 |
/* internal memory per function is initialized inside bxe_pf_init */ |
| 10355 |
break; |
| 10356 |
|
| 10357 |
default: |
| 10358 |
BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); |
| 10359 |
break; |
| 10360 |
} |
| 10361 |
} |
| 10362 |
|
| 10363 |
static void |
| 10364 |
storm_memset_func_cfg(struct bxe_softc *sc, |
| 10365 |
struct tstorm_eth_function_common_config *tcfg, |
| 10366 |
uint16_t abs_fid) |
| 10367 |
{ |
| 10368 |
uint32_t addr; |
| 10369 |
size_t size; |
| 10370 |
|
| 10371 |
addr = (BAR_TSTRORM_INTMEM + |
| 10372 |
TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); |
| 10373 |
size = sizeof(struct tstorm_eth_function_common_config); |
| 10374 |
ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); |
| 10375 |
} |
| 10376 |
|
| 10377 |
static void |
| 10378 |
bxe_func_init(struct bxe_softc *sc, |
| 10379 |
struct bxe_func_init_params *p) |
| 10380 |
{ |
| 10381 |
struct tstorm_eth_function_common_config tcfg = { 0 }; |
| 10382 |
|
| 10383 |
if (CHIP_IS_E1x(sc)) { |
| 10384 |
storm_memset_func_cfg(sc, &tcfg, p->func_id); |
| 10385 |
} |
| 10386 |
|
| 10387 |
/* Enable the function in the FW */ |
| 10388 |
storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); |
| 10389 |
storm_memset_func_en(sc, p->func_id, 1); |
| 10390 |
|
| 10391 |
/* spq */ |
| 10392 |
if (p->func_flgs & FUNC_FLG_SPQ) { |
| 10393 |
storm_memset_spq_addr(sc, p->spq_map, p->func_id); |
| 10394 |
REG_WR(sc, |
| 10395 |
(XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), |
| 10396 |
p->spq_prod); |
| 10397 |
} |
| 10398 |
} |
| 10399 |
|
| 10400 |
/* |
| 10401 |
* Calculates the sum of vn_min_rates. |
| 10402 |
* It's needed for further normalizing of the min_rates. |
| 10403 |
* Returns: |
| 10404 |
* sum of vn_min_rates. |
| 10405 |
* or |
| 10406 |
* 0 - if all the min_rates are 0. |
| 10407 |
* In the later case fainess algorithm should be deactivated. |
| 10408 |
* If all min rates are not zero then those that are zeroes will be set to 1. |
| 10409 |
*/ |
| 10410 |
static void |
| 10411 |
bxe_calc_vn_min(struct bxe_softc *sc, |
| 10412 |
struct cmng_init_input *input) |
| 10413 |
{ |
| 10414 |
uint32_t vn_cfg; |
| 10415 |
uint32_t vn_min_rate; |
| 10416 |
int all_zero = 1; |
| 10417 |
int vn; |
| 10418 |
|
| 10419 |
for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { |
| 10420 |
vn_cfg = sc->devinfo.mf_info.mf_config[vn]; |
| 10421 |
vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> |
| 10422 |
FUNC_MF_CFG_MIN_BW_SHIFT) * 100); |
| 10423 |
|
| 10424 |
if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { |
| 10425 |
/* skip hidden VNs */ |
| 10426 |
vn_min_rate = 0; |
| 10427 |
} else if (!vn_min_rate) { |
| 10428 |
/* If min rate is zero - set it to 100 */ |
| 10429 |
vn_min_rate = DEF_MIN_RATE; |
| 10430 |
} else { |
| 10431 |
all_zero = 0; |
| 10432 |
} |
| 10433 |
|
| 10434 |
input->vnic_min_rate[vn] = vn_min_rate; |
| 10435 |
} |
| 10436 |
|
| 10437 |
/* if ETS or all min rates are zeros - disable fairness */ |
| 10438 |
if (BXE_IS_ETS_ENABLED(sc)) { |
| 10439 |
input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; |
| 10440 |
BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); |
| 10441 |
} else if (all_zero) { |
| 10442 |
input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; |
| 10443 |
BLOGD(sc, DBG_LOAD, |
| 10444 |
"Fariness disabled (all MIN values are zeroes)\n"); |
| 10445 |
} else { |
| 10446 |
input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; |
| 10447 |
} |
| 10448 |
} |
| 10449 |
|
| 10450 |
static inline uint16_t |
| 10451 |
bxe_extract_max_cfg(struct bxe_softc *sc, |
| 10452 |
uint32_t mf_cfg) |
| 10453 |
{ |
| 10454 |
uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> |
| 10455 |
FUNC_MF_CFG_MAX_BW_SHIFT); |
| 10456 |
|
| 10457 |
if (!max_cfg) { |
| 10458 |
BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); |
| 10459 |
max_cfg = 100; |
| 10460 |
} |
| 10461 |
|
| 10462 |
return (max_cfg); |
| 10463 |
} |
| 10464 |
|
| 10465 |
static void |
| 10466 |
bxe_calc_vn_max(struct bxe_softc *sc, |
| 10467 |
int vn, |
| 10468 |
struct cmng_init_input *input) |
| 10469 |
{ |
| 10470 |
uint16_t vn_max_rate; |
| 10471 |
uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; |
| 10472 |
uint32_t max_cfg; |
| 10473 |
|
| 10474 |
if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { |
| 10475 |
vn_max_rate = 0; |
| 10476 |
} else { |
| 10477 |
max_cfg = bxe_extract_max_cfg(sc, vn_cfg); |
| 10478 |
|
| 10479 |
if (IS_MF_SI(sc)) { |
| 10480 |
/* max_cfg in percents of linkspeed */ |
| 10481 |
vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); |
| 10482 |
} else { /* SD modes */ |
| 10483 |
/* max_cfg is absolute in 100Mb units */ |
| 10484 |
vn_max_rate = (max_cfg * 100); |
| 10485 |
} |
| 10486 |
} |
| 10487 |
|
| 10488 |
BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); |
| 10489 |
|
| 10490 |
input->vnic_max_rate[vn] = vn_max_rate; |
| 10491 |
} |
| 10492 |
|
| 10493 |
static void |
| 10494 |
bxe_cmng_fns_init(struct bxe_softc *sc, |
| 10495 |
uint8_t read_cfg, |
| 10496 |
uint8_t cmng_type) |
| 10497 |
{ |
| 10498 |
struct cmng_init_input input; |
| 10499 |
int vn; |
| 10500 |
|
| 10501 |
memset(&input, 0, sizeof(struct cmng_init_input)); |
| 10502 |
|
| 10503 |
input.port_rate = sc->link_vars.line_speed; |
| 10504 |
|
| 10505 |
if (cmng_type == CMNG_FNS_MINMAX) { |
| 10506 |
/* read mf conf from shmem */ |
| 10507 |
if (read_cfg) { |
| 10508 |
bxe_read_mf_cfg(sc); |
| 10509 |
} |
| 10510 |
|
| 10511 |
/* get VN min rate and enable fairness if not 0 */ |
| 10512 |
bxe_calc_vn_min(sc, &input); |
| 10513 |
|
| 10514 |
/* get VN max rate */ |
| 10515 |
if (sc->port.pmf) { |
| 10516 |
for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { |
| 10517 |
bxe_calc_vn_max(sc, vn, &input); |
| 10518 |
} |
| 10519 |
} |
| 10520 |
|
| 10521 |
/* always enable rate shaping and fairness */ |
| 10522 |
input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; |
| 10523 |
|
| 10524 |
ecore_init_cmng(&input, &sc->cmng); |
| 10525 |
return; |
| 10526 |
} |
| 10527 |
|
| 10528 |
/* rate shaping and fairness are disabled */ |
| 10529 |
BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); |
| 10530 |
} |
| 10531 |
|
| 10532 |
static int |
| 10533 |
bxe_get_cmng_fns_mode(struct bxe_softc *sc) |
| 10534 |
{ |
| 10535 |
if (CHIP_REV_IS_SLOW(sc)) { |
| 10536 |
return (CMNG_FNS_NONE); |
| 10537 |
} |
| 10538 |
|
| 10539 |
if (IS_MF(sc)) { |
| 10540 |
return (CMNG_FNS_MINMAX); |
| 10541 |
} |
| 10542 |
|
| 10543 |
return (CMNG_FNS_NONE); |
| 10544 |
} |
| 10545 |
|
| 10546 |
static void |
| 10547 |
storm_memset_cmng(struct bxe_softc *sc, |
| 10548 |
struct cmng_init *cmng, |
| 10549 |
uint8_t port) |
| 10550 |
{ |
| 10551 |
int vn; |
| 10552 |
int func; |
| 10553 |
uint32_t addr; |
| 10554 |
size_t size; |
| 10555 |
|
| 10556 |
addr = (BAR_XSTRORM_INTMEM + |
| 10557 |
XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); |
| 10558 |
size = sizeof(struct cmng_struct_per_port); |
| 10559 |
ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); |
| 10560 |
|
| 10561 |
for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { |
| 10562 |
func = func_by_vn(sc, vn); |
| 10563 |
|
| 10564 |
addr = (BAR_XSTRORM_INTMEM + |
| 10565 |
XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); |
| 10566 |
size = sizeof(struct rate_shaping_vars_per_vn); |
| 10567 |
ecore_storm_memset_struct(sc, addr, size, |
| 10568 |
(uint32_t *)&cmng->vnic.vnic_max_rate[vn]); |
| 10569 |
|
| 10570 |
addr = (BAR_XSTRORM_INTMEM + |
| 10571 |
XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); |
| 10572 |
size = sizeof(struct fairness_vars_per_vn); |
| 10573 |
ecore_storm_memset_struct(sc, addr, size, |
| 10574 |
(uint32_t *)&cmng->vnic.vnic_min_rate[vn]); |
| 10575 |
} |
| 10576 |
} |
| 10577 |
|
| 10578 |
static void |
| 10579 |
bxe_pf_init(struct bxe_softc *sc) |
| 10580 |
{ |
| 10581 |
struct bxe_func_init_params func_init = { 0 }; |
| 10582 |
struct event_ring_data eq_data = { { 0 } }; |
| 10583 |
uint16_t flags; |
| 10584 |
|
| 10585 |
if (!CHIP_IS_E1x(sc)) { |
| 10586 |
/* reset IGU PF statistics: MSIX + ATTN */ |
| 10587 |
/* PF */ |
| 10588 |
REG_WR(sc, |
| 10589 |
(IGU_REG_STATISTIC_NUM_MESSAGE_SENT + |
| 10590 |
(BXE_IGU_STAS_MSG_VF_CNT * 4) + |
| 10591 |
((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), |
| 10592 |
0); |
| 10593 |
/* ATTN */ |
| 10594 |
REG_WR(sc, |
| 10595 |
(IGU_REG_STATISTIC_NUM_MESSAGE_SENT + |
| 10596 |
(BXE_IGU_STAS_MSG_VF_CNT * 4) + |
| 10597 |
(BXE_IGU_STAS_MSG_PF_CNT * 4) + |
| 10598 |
((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), |
| 10599 |
0); |
| 10600 |
} |
| 10601 |
|
| 10602 |
/* function setup flags */ |
| 10603 |
flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); |
| 10604 |
|
| 10605 |
/* |
| 10606 |
* This flag is relevant for E1x only. |
| 10607 |
* E2 doesn't have a TPA configuration in a function level. |
| 10608 |
*/ |
| 10609 |
flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; |
| 10610 |
|
| 10611 |
func_init.func_flgs = flags; |
| 10612 |
func_init.pf_id = SC_FUNC(sc); |
| 10613 |
func_init.func_id = SC_FUNC(sc); |
| 10614 |
func_init.spq_map = sc->spq_dma.paddr; |
| 10615 |
func_init.spq_prod = sc->spq_prod_idx; |
| 10616 |
|
| 10617 |
bxe_func_init(sc, &func_init); |
| 10618 |
|
| 10619 |
memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); |
| 10620 |
|
| 10621 |
/* |
| 10622 |
* Congestion management values depend on the link rate. |
| 10623 |
* There is no active link so initial link rate is set to 10Gbps. |
| 10624 |
* When the link comes up the congestion management values are |
| 10625 |
* re-calculated according to the actual link rate. |
| 10626 |
*/ |
| 10627 |
sc->link_vars.line_speed = SPEED_10000; |
| 10628 |
bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); |
| 10629 |
|
| 10630 |
/* Only the PMF sets the HW */ |
| 10631 |
if (sc->port.pmf) { |
| 10632 |
storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); |
| 10633 |
} |
| 10634 |
|
| 10635 |
/* init Event Queue - PCI bus guarantees correct endainity */ |
| 10636 |
eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); |
| 10637 |
eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); |
| 10638 |
eq_data.producer = sc->eq_prod; |
| 10639 |
eq_data.index_id = HC_SP_INDEX_EQ_CONS; |
| 10640 |
eq_data.sb_id = DEF_SB_ID; |
| 10641 |
storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); |
| 10642 |
} |
| 10643 |
|
| 10644 |
static void |
| 10645 |
bxe_hc_int_enable(struct bxe_softc *sc) |
| 10646 |
{ |
| 10647 |
int port = SC_PORT(sc); |
| 10648 |
uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
| 10649 |
uint32_t val = REG_RD(sc, addr); |
| 10650 |
uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; |
| 10651 |
uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && |
| 10652 |
(sc->intr_count == 1)) ? TRUE : FALSE; |
| 10653 |
uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; |
| 10654 |
|
| 10655 |
if (msix) { |
| 10656 |
val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | |
| 10657 |
HC_CONFIG_0_REG_INT_LINE_EN_0); |
| 10658 |
val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | |
| 10659 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
| 10660 |
if (single_msix) { |
| 10661 |
val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; |
| 10662 |
} |
| 10663 |
} else if (msi) { |
| 10664 |
val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; |
| 10665 |
val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | |
| 10666 |
HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | |
| 10667 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
| 10668 |
} else { |
| 10669 |
val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | |
| 10670 |
HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | |
| 10671 |
HC_CONFIG_0_REG_INT_LINE_EN_0 | |
| 10672 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
| 10673 |
|
| 10674 |
if (!CHIP_IS_E1(sc)) { |
| 10675 |
BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", |
| 10676 |
val, port, addr); |
| 10677 |
|
| 10678 |
REG_WR(sc, addr, val); |
| 10679 |
|
| 10680 |
val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; |
| 10681 |
} |
| 10682 |
} |
| 10683 |
|
| 10684 |
if (CHIP_IS_E1(sc)) { |
| 10685 |
REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); |
| 10686 |
} |
| 10687 |
|
| 10688 |
BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", |
| 10689 |
val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); |
| 10690 |
|
| 10691 |
REG_WR(sc, addr, val); |
| 10692 |
|
| 10693 |
/* ensure that HC_CONFIG is written before leading/trailing edge config */ |
| 10694 |
mb(); |
| 10695 |
|
| 10696 |
if (!CHIP_IS_E1(sc)) { |
| 10697 |
/* init leading/trailing edge */ |
| 10698 |
if (IS_MF(sc)) { |
| 10699 |
val = (0xee0f | (1 << (SC_VN(sc) + 4))); |
| 10700 |
if (sc->port.pmf) { |
| 10701 |
/* enable nig and gpio3 attention */ |
| 10702 |
val |= 0x1100; |
| 10703 |
} |
| 10704 |
} else { |
| 10705 |
val = 0xffff; |
| 10706 |
} |
| 10707 |
|
| 10708 |
REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); |
| 10709 |
REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); |
| 10710 |
} |
| 10711 |
|
| 10712 |
/* make sure that interrupts are indeed enabled from here on */ |
| 10713 |
mb(); |
| 10714 |
} |
| 10715 |
|
| 10716 |
static void |
| 10717 |
bxe_igu_int_enable(struct bxe_softc *sc) |
| 10718 |
{ |
| 10719 |
uint32_t val; |
| 10720 |
uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; |
| 10721 |
uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && |
| 10722 |
(sc->intr_count == 1)) ? TRUE : FALSE; |
| 10723 |
uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; |
| 10724 |
|
| 10725 |
val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); |
| 10726 |
|
| 10727 |
if (msix) { |
| 10728 |
val &= ~(IGU_PF_CONF_INT_LINE_EN | |
| 10729 |
IGU_PF_CONF_SINGLE_ISR_EN); |
| 10730 |
val |= (IGU_PF_CONF_MSI_MSIX_EN | |
| 10731 |
IGU_PF_CONF_ATTN_BIT_EN); |
| 10732 |
if (single_msix) { |
| 10733 |
val |= IGU_PF_CONF_SINGLE_ISR_EN; |
| 10734 |
} |
| 10735 |
} else if (msi) { |
| 10736 |
val &= ~IGU_PF_CONF_INT_LINE_EN; |
| 10737 |
val |= (IGU_PF_CONF_MSI_MSIX_EN | |
| 10738 |
IGU_PF_CONF_ATTN_BIT_EN | |
| 10739 |
IGU_PF_CONF_SINGLE_ISR_EN); |
| 10740 |
} else { |
| 10741 |
val &= ~IGU_PF_CONF_MSI_MSIX_EN; |
| 10742 |
val |= (IGU_PF_CONF_INT_LINE_EN | |
| 10743 |
IGU_PF_CONF_ATTN_BIT_EN | |
| 10744 |
IGU_PF_CONF_SINGLE_ISR_EN); |
| 10745 |
} |
| 10746 |
|
| 10747 |
/* clean previous status - need to configure igu prior to ack*/ |
| 10748 |
if ((!msix) || single_msix) { |
| 10749 |
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); |
| 10750 |
bxe_ack_int(sc); |
| 10751 |
} |
| 10752 |
|
| 10753 |
val |= IGU_PF_CONF_FUNC_EN; |
| 10754 |
|
| 10755 |
BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", |
| 10756 |
val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); |
| 10757 |
|
| 10758 |
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); |
| 10759 |
|
| 10760 |
mb(); |
| 10761 |
|
| 10762 |
/* init leading/trailing edge */ |
| 10763 |
if (IS_MF(sc)) { |
| 10764 |
val = (0xee0f | (1 << (SC_VN(sc) + 4))); |
| 10765 |
if (sc->port.pmf) { |
| 10766 |
/* enable nig and gpio3 attention */ |
| 10767 |
val |= 0x1100; |
| 10768 |
} |
| 10769 |
} else { |
| 10770 |
val = 0xffff; |
| 10771 |
} |
| 10772 |
|
| 10773 |
REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); |
| 10774 |
REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); |
| 10775 |
|
| 10776 |
/* make sure that interrupts are indeed enabled from here on */ |
| 10777 |
mb(); |
| 10778 |
} |
| 10779 |
|
| 10780 |
static void |
| 10781 |
bxe_int_enable(struct bxe_softc *sc) |
| 10782 |
{ |
| 10783 |
if (sc->devinfo.int_block == INT_BLOCK_HC) { |
| 10784 |
bxe_hc_int_enable(sc); |
| 10785 |
} else { |
| 10786 |
bxe_igu_int_enable(sc); |
| 10787 |
} |
| 10788 |
} |
| 10789 |
|
| 10790 |
static void |
| 10791 |
bxe_hc_int_disable(struct bxe_softc *sc) |
| 10792 |
{ |
| 10793 |
int port = SC_PORT(sc); |
| 10794 |
uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
| 10795 |
uint32_t val = REG_RD(sc, addr); |
| 10796 |
|
| 10797 |
/* |
| 10798 |
* In E1 we must use only PCI configuration space to disable MSI/MSIX |
| 10799 |
* capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC |
| 10800 |
* block |
| 10801 |
*/ |
| 10802 |
if (CHIP_IS_E1(sc)) { |
| 10803 |
/* |
| 10804 |
* Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register |
| 10805 |
* to prevent from HC sending interrupts after we exit the function |
| 10806 |
*/ |
| 10807 |
REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); |
| 10808 |
|
| 10809 |
val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | |
| 10810 |
HC_CONFIG_0_REG_INT_LINE_EN_0 | |
| 10811 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
| 10812 |
} else { |
| 10813 |
val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | |
| 10814 |
HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | |
| 10815 |
HC_CONFIG_0_REG_INT_LINE_EN_0 | |
| 10816 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
| 10817 |
} |
| 10818 |
|
| 10819 |
BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); |
| 10820 |
|
| 10821 |
/* flush all outstanding writes */ |
| 10822 |
mb(); |
| 10823 |
|
| 10824 |
REG_WR(sc, addr, val); |
| 10825 |
if (REG_RD(sc, addr) != val) { |
| 10826 |
BLOGE(sc, "proper val not read from HC IGU!\n"); |
| 10827 |
} |
| 10828 |
} |
| 10829 |
|
| 10830 |
static void |
| 10831 |
bxe_igu_int_disable(struct bxe_softc *sc) |
| 10832 |
{ |
| 10833 |
uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); |
| 10834 |
|
| 10835 |
val &= ~(IGU_PF_CONF_MSI_MSIX_EN | |
| 10836 |
IGU_PF_CONF_INT_LINE_EN | |
| 10837 |
IGU_PF_CONF_ATTN_BIT_EN); |
| 10838 |
|
| 10839 |
BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); |
| 10840 |
|
| 10841 |
/* flush all outstanding writes */ |
| 10842 |
mb(); |
| 10843 |
|
| 10844 |
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); |
| 10845 |
if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { |
| 10846 |
BLOGE(sc, "proper val not read from IGU!\n"); |
| 10847 |
} |
| 10848 |
} |
| 10849 |
|
| 10850 |
static void |
| 10851 |
bxe_int_disable(struct bxe_softc *sc) |
| 10852 |
{ |
| 10853 |
if (sc->devinfo.int_block == INT_BLOCK_HC) { |
| 10854 |
bxe_hc_int_disable(sc); |
| 10855 |
} else { |
| 10856 |
bxe_igu_int_disable(sc); |
| 10857 |
} |
| 10858 |
} |
| 10859 |
|
| 10860 |
static void |
| 10861 |
bxe_nic_init(struct bxe_softc *sc, |
| 10862 |
int load_code) |
| 10863 |
{ |
| 10864 |
int i; |
| 10865 |
|
| 10866 |
for (i = 0; i < sc->num_queues; i++) { |
| 10867 |
bxe_init_eth_fp(sc, i); |
| 10868 |
} |
| 10869 |
|
| 10870 |
rmb(); /* ensure status block indices were read */ |
| 10871 |
|
| 10872 |
bxe_init_rx_rings(sc); |
| 10873 |
bxe_init_tx_rings(sc); |
| 10874 |
|
| 10875 |
if (IS_VF(sc)) { |
| 10876 |
return; |
| 10877 |
} |
| 10878 |
|
| 10879 |
/* initialize MOD_ABS interrupts */ |
| 10880 |
elink_init_mod_abs_int(sc, &sc->link_vars, |
| 10881 |
sc->devinfo.chip_id, |
| 10882 |
sc->devinfo.shmem_base, |
| 10883 |
sc->devinfo.shmem2_base, |
| 10884 |
SC_PORT(sc)); |
| 10885 |
|
| 10886 |
bxe_init_def_sb(sc); |
| 10887 |
bxe_update_dsb_idx(sc); |
| 10888 |
bxe_init_sp_ring(sc); |
| 10889 |
bxe_init_eq_ring(sc); |
| 10890 |
bxe_init_internal(sc, load_code); |
| 10891 |
bxe_pf_init(sc); |
| 10892 |
bxe_stats_init(sc); |
| 10893 |
|
| 10894 |
/* flush all before enabling interrupts */ |
| 10895 |
mb(); |
| 10896 |
|
| 10897 |
bxe_int_enable(sc); |
| 10898 |
|
| 10899 |
/* check for SPIO5 */ |
| 10900 |
bxe_attn_int_deasserted0(sc, |
| 10901 |
REG_RD(sc, |
| 10902 |
(MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + |
| 10903 |
SC_PORT(sc)*4)) & |
| 10904 |
AEU_INPUTS_ATTN_BITS_SPIO5); |
| 10905 |
} |
| 10906 |
|
| 10907 |
static inline void |
| 10908 |
bxe_init_objs(struct bxe_softc *sc) |
| 10909 |
{ |
| 10910 |
/* mcast rules must be added to tx if tx switching is enabled */ |
| 10911 |
ecore_obj_type o_type = |
| 10912 |
(sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : |
| 10913 |
ECORE_OBJ_TYPE_RX; |
| 10914 |
|
| 10915 |
/* RX_MODE controlling object */ |
| 10916 |
ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); |
| 10917 |
|
| 10918 |
/* multicast configuration controlling object */ |
| 10919 |
ecore_init_mcast_obj(sc, |
| 10920 |
&sc->mcast_obj, |
| 10921 |
sc->fp[0].cl_id, |
| 10922 |
sc->fp[0].index, |
| 10923 |
SC_FUNC(sc), |
| 10924 |
SC_FUNC(sc), |
| 10925 |
BXE_SP(sc, mcast_rdata), |
| 10926 |
BXE_SP_MAPPING(sc, mcast_rdata), |
| 10927 |
ECORE_FILTER_MCAST_PENDING, |
| 10928 |
&sc->sp_state, |
| 10929 |
o_type); |
| 10930 |
|
| 10931 |
/* Setup CAM credit pools */ |
| 10932 |
ecore_init_mac_credit_pool(sc, |
| 10933 |
&sc->macs_pool, |
| 10934 |
SC_FUNC(sc), |
| 10935 |
CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : |
| 10936 |
VNICS_PER_PATH(sc)); |
| 10937 |
|
| 10938 |
ecore_init_vlan_credit_pool(sc, |
| 10939 |
&sc->vlans_pool, |
| 10940 |
SC_ABS_FUNC(sc) >> 1, |
| 10941 |
CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : |
| 10942 |
VNICS_PER_PATH(sc)); |
| 10943 |
|
| 10944 |
/* RSS configuration object */ |
| 10945 |
ecore_init_rss_config_obj(sc, |
| 10946 |
&sc->rss_conf_obj, |
| 10947 |
sc->fp[0].cl_id, |
| 10948 |
sc->fp[0].index, |
| 10949 |
SC_FUNC(sc), |
| 10950 |
SC_FUNC(sc), |
| 10951 |
BXE_SP(sc, rss_rdata), |
| 10952 |
BXE_SP_MAPPING(sc, rss_rdata), |
| 10953 |
ECORE_FILTER_RSS_CONF_PENDING, |
| 10954 |
&sc->sp_state, ECORE_OBJ_TYPE_RX); |
| 10955 |
} |
| 10956 |
|
| 10957 |
/* |
| 10958 |
* Initialize the function. This must be called before sending CLIENT_SETUP |
| 10959 |
* for the first client. |
| 10960 |
*/ |
| 10961 |
static inline int |
| 10962 |
bxe_func_start(struct bxe_softc *sc) |
| 10963 |
{ |
| 10964 |
struct ecore_func_state_params func_params = { NULL }; |
| 10965 |
struct ecore_func_start_params *start_params = &func_params.params.start; |
| 10966 |
|
| 10967 |
/* Prepare parameters for function state transitions */ |
| 10968 |
bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); |
| 10969 |
|
| 10970 |
func_params.f_obj = &sc->func_obj; |
| 10971 |
func_params.cmd = ECORE_F_CMD_START; |
| 10972 |
|
| 10973 |
/* Function parameters */ |
| 10974 |
start_params->mf_mode = sc->devinfo.mf_info.mf_mode; |
| 10975 |
start_params->sd_vlan_tag = OVLAN(sc); |
| 10976 |
|
| 10977 |
if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { |
| 10978 |
start_params->network_cos_mode = STATIC_COS; |
| 10979 |
} else { /* CHIP_IS_E1X */ |
| 10980 |
start_params->network_cos_mode = FW_WRR; |
| 10981 |
} |
| 10982 |
|
| 10983 |
start_params->gre_tunnel_mode = 0; |
| 10984 |
start_params->gre_tunnel_rss = 0; |
| 10985 |
|
| 10986 |
return (ecore_func_state_change(sc, &func_params)); |
| 10987 |
} |
| 10988 |
|
| 10989 |
static int |
| 10990 |
bxe_set_power_state(struct bxe_softc *sc, |
| 10991 |
uint8_t state) |
| 10992 |
{ |
| 10993 |
uint16_t pmcsr; |
| 10994 |
|
| 10995 |
/* If there is no power capability, silently succeed */ |
| 10996 |
if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { |
| 10997 |
BLOGW(sc, "No power capability\n"); |
| 10998 |
return (0); |
| 10999 |
} |
| 11000 |
|
| 11001 |
pmcsr = pci_read_config(sc->dev, |
| 11002 |
(sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), |
| 11003 |
2); |
| 11004 |
|
| 11005 |
switch (state) { |
| 11006 |
case PCI_PM_D0: |
| 11007 |
pci_write_config(sc->dev, |
| 11008 |
(sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), |
| 11009 |
((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); |
| 11010 |
|
| 11011 |
if (pmcsr & PCIM_PSTAT_DMASK) { |
| 11012 |
/* delay required during transition out of D3hot */ |
| 11013 |
DELAY(20000); |
| 11014 |
} |
| 11015 |
|
| 11016 |
break; |
| 11017 |
|
| 11018 |
case PCI_PM_D3hot: |
| 11019 |
/* XXX if there are other clients above don't shut down the power */ |
| 11020 |
|
| 11021 |
/* don't shut down the power for emulation and FPGA */ |
| 11022 |
if (CHIP_REV_IS_SLOW(sc)) { |
| 11023 |
return (0); |
| 11024 |
} |
| 11025 |
|
| 11026 |
pmcsr &= ~PCIM_PSTAT_DMASK; |
| 11027 |
pmcsr |= PCIM_PSTAT_D3; |
| 11028 |
|
| 11029 |
if (sc->wol) { |
| 11030 |
pmcsr |= PCIM_PSTAT_PMEENABLE; |
| 11031 |
} |
| 11032 |
|
| 11033 |
pci_write_config(sc->dev, |
| 11034 |
(sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), |
| 11035 |
pmcsr, 4); |
| 11036 |
|
| 11037 |
/* |
| 11038 |
* No more memory access after this point until device is brought back |
| 11039 |
* to D0 state. |
| 11040 |
*/ |
| 11041 |
break; |
| 11042 |
|
| 11043 |
default: |
| 11044 |
BLOGE(sc, "Can't support PCI power state = %d\n", state); |
| 11045 |
return (-1); |
| 11046 |
} |
| 11047 |
|
| 11048 |
return (0); |
| 11049 |
} |
| 11050 |
|
| 11051 |
|
| 11052 |
/* return true if succeeded to acquire the lock */ |
| 11053 |
static uint8_t |
| 11054 |
bxe_trylock_hw_lock(struct bxe_softc *sc, |
| 11055 |
uint32_t resource) |
| 11056 |
{ |
| 11057 |
uint32_t lock_status; |
| 11058 |
uint32_t resource_bit = (1 << resource); |
| 11059 |
int func = SC_FUNC(sc); |
| 11060 |
uint32_t hw_lock_control_reg; |
| 11061 |
|
| 11062 |
BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); |
| 11063 |
|
| 11064 |
/* Validating that the resource is within range */ |
| 11065 |
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { |
| 11066 |
BLOGD(sc, DBG_LOAD, |
| 11067 |
"resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", |
| 11068 |
resource, HW_LOCK_MAX_RESOURCE_VALUE); |
| 11069 |
return (FALSE); |
| 11070 |
} |
| 11071 |
|
| 11072 |
if (func <= 5) { |
| 11073 |
hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); |
| 11074 |
} else { |
| 11075 |
hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); |
| 11076 |
} |
| 11077 |
|
| 11078 |
/* try to acquire the lock */ |
| 11079 |
REG_WR(sc, hw_lock_control_reg + 4, resource_bit); |
| 11080 |
lock_status = REG_RD(sc, hw_lock_control_reg); |
| 11081 |
if (lock_status & resource_bit) { |
| 11082 |
return (TRUE); |
| 11083 |
} |
| 11084 |
|
| 11085 |
BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource); |
| 11086 |
|
| 11087 |
return (FALSE); |
| 11088 |
} |
| 11089 |
|
| 11090 |
/* |
| 11091 |
* Get the recovery leader resource id according to the engine this function |
| 11092 |
* belongs to. Currently only only 2 engines is supported. |
| 11093 |
*/ |
| 11094 |
static int |
| 11095 |
bxe_get_leader_lock_resource(struct bxe_softc *sc) |
| 11096 |
{ |
| 11097 |
if (SC_PATH(sc)) { |
| 11098 |
return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); |
| 11099 |
} else { |
| 11100 |
return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); |
| 11101 |
} |
| 11102 |
} |
| 11103 |
|
| 11104 |
/* try to acquire a leader lock for current engine */ |
| 11105 |
static uint8_t |
| 11106 |
bxe_trylock_leader_lock(struct bxe_softc *sc) |
| 11107 |
{ |
| 11108 |
return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); |
| 11109 |
} |
| 11110 |
|
| 11111 |
static int |
| 11112 |
bxe_release_leader_lock(struct bxe_softc *sc) |
| 11113 |
{ |
| 11114 |
return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); |
| 11115 |
} |
| 11116 |
|
| 11117 |
/* close gates #2, #3 and #4 */ |
| 11118 |
static void |
| 11119 |
bxe_set_234_gates(struct bxe_softc *sc, |
| 11120 |
uint8_t close) |
| 11121 |
{ |
| 11122 |
uint32_t val; |
| 11123 |
|
| 11124 |
/* gates #2 and #4a are closed/opened for "not E1" only */ |
| 11125 |
if (!CHIP_IS_E1(sc)) { |
| 11126 |
/* #4 */ |
| 11127 |
REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); |
| 11128 |
/* #2 */ |
| 11129 |
REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); |
| 11130 |
} |
| 11131 |
|
| 11132 |
/* #3 */ |
| 11133 |
if (CHIP_IS_E1x(sc)) { |
| 11134 |
/* prevent interrupts from HC on both ports */ |
| 11135 |
val = REG_RD(sc, HC_REG_CONFIG_1); |
| 11136 |
REG_WR(sc, HC_REG_CONFIG_1, |
| 11137 |
(!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : |
| 11138 |
(val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); |
| 11139 |
|
| 11140 |
val = REG_RD(sc, HC_REG_CONFIG_0); |
| 11141 |
REG_WR(sc, HC_REG_CONFIG_0, |
| 11142 |
(!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : |
| 11143 |
(val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); |
| 11144 |
} else { |
| 11145 |
/* Prevent incomming interrupts in IGU */ |
| 11146 |
val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); |
| 11147 |
|
| 11148 |
REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, |
| 11149 |
(!close) ? |
| 11150 |
(val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : |
| 11151 |
(val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); |
| 11152 |
} |
| 11153 |
|
| 11154 |
BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", |
| 11155 |
close ? "closing" : "opening"); |
| 11156 |
|
| 11157 |
wmb(); |
| 11158 |
} |
| 11159 |
|
| 11160 |
/* poll for pending writes bit, it should get cleared in no more than 1s */ |
| 11161 |
static int |
| 11162 |
bxe_er_poll_igu_vq(struct bxe_softc *sc) |
| 11163 |
{ |
| 11164 |
uint32_t cnt = 1000; |
| 11165 |
uint32_t pend_bits = 0; |
| 11166 |
|
| 11167 |
do { |
| 11168 |
pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); |
| 11169 |
|
| 11170 |
if (pend_bits == 0) { |
| 11171 |
break; |
| 11172 |
} |
| 11173 |
|
| 11174 |
DELAY(1000); |
| 11175 |
} while (--cnt > 0); |
| 11176 |
|
| 11177 |
if (cnt == 0) { |
| 11178 |
BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); |
| 11179 |
return (-1); |
| 11180 |
} |
| 11181 |
|
| 11182 |
return (0); |
| 11183 |
} |
| 11184 |
|
| 11185 |
#define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ |
| 11186 |
|
| 11187 |
static void |
| 11188 |
bxe_clp_reset_prep(struct bxe_softc *sc, |
| 11189 |
uint32_t *magic_val) |
| 11190 |
{ |
| 11191 |
/* Do some magic... */ |
| 11192 |
uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); |
| 11193 |
*magic_val = val & SHARED_MF_CLP_MAGIC; |
| 11194 |
MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); |
| 11195 |
} |
| 11196 |
|
| 11197 |
/* restore the value of the 'magic' bit */ |
| 11198 |
static void |
| 11199 |
bxe_clp_reset_done(struct bxe_softc *sc, |
| 11200 |
uint32_t magic_val) |
| 11201 |
{ |
| 11202 |
/* Restore the 'magic' bit value... */ |
| 11203 |
uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); |
| 11204 |
MFCFG_WR(sc, shared_mf_config.clp_mb, |
| 11205 |
(val & (~SHARED_MF_CLP_MAGIC)) | magic_val); |
| 11206 |
} |
| 11207 |
|
| 11208 |
/* prepare for MCP reset, takes care of CLP configurations */ |
| 11209 |
static void |
| 11210 |
bxe_reset_mcp_prep(struct bxe_softc *sc, |
| 11211 |
uint32_t *magic_val) |
| 11212 |
{ |
| 11213 |
uint32_t shmem; |
| 11214 |
uint32_t validity_offset; |
| 11215 |
|
| 11216 |
/* set `magic' bit in order to save MF config */ |
| 11217 |
if (!CHIP_IS_E1(sc)) { |
| 11218 |
bxe_clp_reset_prep(sc, magic_val); |
| 11219 |
} |
| 11220 |
|
| 11221 |
/* get shmem offset */ |
| 11222 |
shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); |
| 11223 |
validity_offset = |
| 11224 |
offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); |
| 11225 |
|
| 11226 |
/* Clear validity map flags */ |
| 11227 |
if (shmem > 0) { |
| 11228 |
REG_WR(sc, shmem + validity_offset, 0); |
| 11229 |
} |
| 11230 |
} |
| 11231 |
|
| 11232 |
#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ |
| 11233 |
#define MCP_ONE_TIMEOUT 100 /* 100 ms */ |
| 11234 |
|
| 11235 |
static void |
| 11236 |
bxe_mcp_wait_one(struct bxe_softc *sc) |
| 11237 |
{ |
| 11238 |
/* special handling for emulation and FPGA (10 times longer) */ |
| 11239 |
if (CHIP_REV_IS_SLOW(sc)) { |
| 11240 |
DELAY((MCP_ONE_TIMEOUT*10) * 1000); |
| 11241 |
} else { |
| 11242 |
DELAY((MCP_ONE_TIMEOUT) * 1000); |
| 11243 |
} |
| 11244 |
} |
| 11245 |
|
| 11246 |
/* initialize shmem_base and waits for validity signature to appear */ |
| 11247 |
static int |
| 11248 |
bxe_init_shmem(struct bxe_softc *sc) |
| 11249 |
{ |
| 11250 |
int cnt = 0; |
| 11251 |
uint32_t val = 0; |
| 11252 |
|
| 11253 |
do { |
| 11254 |
sc->devinfo.shmem_base = |
| 11255 |
sc->link_params.shmem_base = |
| 11256 |
REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); |
| 11257 |
|
| 11258 |
if (sc->devinfo.shmem_base) { |
| 11259 |
val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); |
| 11260 |
if (val & SHR_MEM_VALIDITY_MB) |
| 11261 |
return (0); |
| 11262 |
} |
| 11263 |
|
| 11264 |
bxe_mcp_wait_one(sc); |
| 11265 |
|
| 11266 |
} while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); |
| 11267 |
|
| 11268 |
BLOGE(sc, "BAD MCP validity signature\n"); |
| 11269 |
|
| 11270 |
return (-1); |
| 11271 |
} |
| 11272 |
|
| 11273 |
static int |
| 11274 |
bxe_reset_mcp_comp(struct bxe_softc *sc, |
| 11275 |
uint32_t magic_val) |
| 11276 |
{ |
| 11277 |
int rc = bxe_init_shmem(sc); |
| 11278 |
|
| 11279 |
/* Restore the `magic' bit value */ |
| 11280 |
if (!CHIP_IS_E1(sc)) { |
| 11281 |
bxe_clp_reset_done(sc, magic_val); |
| 11282 |
} |
| 11283 |
|
| 11284 |
return (rc); |
| 11285 |
} |
| 11286 |
|
| 11287 |
static void |
| 11288 |
bxe_pxp_prep(struct bxe_softc *sc) |
| 11289 |
{ |
| 11290 |
if (!CHIP_IS_E1(sc)) { |
| 11291 |
REG_WR(sc, PXP2_REG_RD_START_INIT, 0); |
| 11292 |
REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); |
| 11293 |
wmb(); |
| 11294 |
} |
| 11295 |
} |
| 11296 |
|
| 11297 |
/* |
| 11298 |
* Reset the whole chip except for: |
| 11299 |
* - PCIE core |
| 11300 |
* - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) |
| 11301 |
* - IGU |
| 11302 |
* - MISC (including AEU) |
| 11303 |
* - GRC |
| 11304 |
* - RBCN, RBCP |
| 11305 |
*/ |
| 11306 |
static void |
| 11307 |
bxe_process_kill_chip_reset(struct bxe_softc *sc, |
| 11308 |
uint8_t global) |
| 11309 |
{ |
| 11310 |
uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; |
| 11311 |
uint32_t global_bits2, stay_reset2; |
| 11312 |
|
| 11313 |
/* |
| 11314 |
* Bits that have to be set in reset_mask2 if we want to reset 'global' |
| 11315 |
* (per chip) blocks. |
| 11316 |
*/ |
| 11317 |
global_bits2 = |
| 11318 |
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | |
| 11319 |
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; |
| 11320 |
|
| 11321 |
/* |
| 11322 |
* Don't reset the following blocks. |
| 11323 |
* Important: per port blocks (such as EMAC, BMAC, UMAC) can't be |
| 11324 |
* reset, as in 4 port device they might still be owned |
| 11325 |
* by the MCP (there is only one leader per path). |
| 11326 |
*/ |
| 11327 |
not_reset_mask1 = |
| 11328 |
MISC_REGISTERS_RESET_REG_1_RST_HC | |
| 11329 |
MISC_REGISTERS_RESET_REG_1_RST_PXPV | |
| 11330 |
MISC_REGISTERS_RESET_REG_1_RST_PXP; |
| 11331 |
|
| 11332 |
not_reset_mask2 = |
| 11333 |
MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | |
| 11334 |
MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | |
| 11335 |
MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | |
| 11336 |
MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | |
| 11337 |
MISC_REGISTERS_RESET_REG_2_RST_RBCN | |
| 11338 |
MISC_REGISTERS_RESET_REG_2_RST_GRC | |
| 11339 |
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | |
| 11340 |
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | |
| 11341 |
MISC_REGISTERS_RESET_REG_2_RST_ATC | |
| 11342 |
MISC_REGISTERS_RESET_REG_2_PGLC | |
| 11343 |
MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | |
| 11344 |
MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | |
| 11345 |
MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | |
| 11346 |
MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | |
| 11347 |
MISC_REGISTERS_RESET_REG_2_UMAC0 | |
| 11348 |
MISC_REGISTERS_RESET_REG_2_UMAC1; |
| 11349 |
|
| 11350 |
/* |
| 11351 |
* Keep the following blocks in reset: |
| 11352 |
* - all xxMACs are handled by the elink code. |
| 11353 |
*/ |
| 11354 |
stay_reset2 = |
| 11355 |
MISC_REGISTERS_RESET_REG_2_XMAC | |
| 11356 |
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; |
| 11357 |
|
| 11358 |
/* Full reset masks according to the chip */ |
| 11359 |
reset_mask1 = 0xffffffff; |
| 11360 |
|
| 11361 |
if (CHIP_IS_E1(sc)) |
| 11362 |
reset_mask2 = 0xffff; |
| 11363 |
else if (CHIP_IS_E1H(sc)) |
| 11364 |
reset_mask2 = 0x1ffff; |
| 11365 |
else if (CHIP_IS_E2(sc)) |
| 11366 |
reset_mask2 = 0xfffff; |
| 11367 |
else /* CHIP_IS_E3 */ |
| 11368 |
reset_mask2 = 0x3ffffff; |
| 11369 |
|
| 11370 |
/* Don't reset global blocks unless we need to */ |
| 11371 |
if (!global) |
| 11372 |
reset_mask2 &= ~global_bits2; |
| 11373 |
|
| 11374 |
/* |
| 11375 |
* In case of attention in the QM, we need to reset PXP |
| 11376 |
* (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM |
| 11377 |
* because otherwise QM reset would release 'close the gates' shortly |
| 11378 |
* before resetting the PXP, then the PSWRQ would send a write |
| 11379 |
* request to PGLUE. Then when PXP is reset, PGLUE would try to |
| 11380 |
* read the payload data from PSWWR, but PSWWR would not |
| 11381 |
* respond. The write queue in PGLUE would stuck, dmae commands |
| 11382 |
* would not return. Therefore it's important to reset the second |
| 11383 |
* reset register (containing the |
| 11384 |
* MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the |
| 11385 |
* first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM |
| 11386 |
* bit). |
| 11387 |
*/ |
| 11388 |
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, |
| 11389 |
reset_mask2 & (~not_reset_mask2)); |
| 11390 |
|
| 11391 |
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
| 11392 |
reset_mask1 & (~not_reset_mask1)); |
| 11393 |
|
| 11394 |
mb(); |
| 11395 |
wmb(); |
| 11396 |
|
| 11397 |
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, |
| 11398 |
reset_mask2 & (~stay_reset2)); |
| 11399 |
|
| 11400 |
mb(); |
| 11401 |
wmb(); |
| 11402 |
|
| 11403 |
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); |
| 11404 |
wmb(); |
| 11405 |
} |
| 11406 |
|
| 11407 |
static int |
| 11408 |
bxe_process_kill(struct bxe_softc *sc, |
| 11409 |
uint8_t global) |
| 11410 |
{ |
| 11411 |
int cnt = 1000; |
| 11412 |
uint32_t val = 0; |
| 11413 |
uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; |
| 11414 |
uint32_t tags_63_32 = 0; |
| 11415 |
|
| 11416 |
/* Empty the Tetris buffer, wait for 1s */ |
| 11417 |
do { |
| 11418 |
sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); |
| 11419 |
blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); |
| 11420 |
port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); |
| 11421 |
port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); |
| 11422 |
pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); |
| 11423 |
if (CHIP_IS_E3(sc)) { |
| 11424 |
tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); |
| 11425 |
} |
| 11426 |
|
| 11427 |
if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && |
| 11428 |
((port_is_idle_0 & 0x1) == 0x1) && |
| 11429 |
((port_is_idle_1 & 0x1) == 0x1) && |
| 11430 |
(pgl_exp_rom2 == 0xffffffff) && |
| 11431 |
(!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) |
| 11432 |
break; |
| 11433 |
DELAY(1000); |
| 11434 |
} while (cnt-- > 0); |
| 11435 |
|
| 11436 |
if (cnt <= 0) { |
| 11437 |
BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " |
| 11438 |
"are still outstanding read requests after 1s! " |
| 11439 |
"sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " |
| 11440 |
"port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", |
| 11441 |
sr_cnt, blk_cnt, port_is_idle_0, |
| 11442 |
port_is_idle_1, pgl_exp_rom2); |
| 11443 |
return (-1); |
| 11444 |
} |
| 11445 |
|
| 11446 |
mb(); |
| 11447 |
|
| 11448 |
/* Close gates #2, #3 and #4 */ |
| 11449 |
bxe_set_234_gates(sc, TRUE); |
| 11450 |
|
| 11451 |
/* Poll for IGU VQs for 57712 and newer chips */ |
| 11452 |
if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { |
| 11453 |
return (-1); |
| 11454 |
} |
| 11455 |
|
| 11456 |
/* XXX indicate that "process kill" is in progress to MCP */ |
| 11457 |
|
| 11458 |
/* clear "unprepared" bit */ |
| 11459 |
REG_WR(sc, MISC_REG_UNPREPARED, 0); |
| 11460 |
mb(); |
| 11461 |
|
| 11462 |
/* Make sure all is written to the chip before the reset */ |
| 11463 |
wmb(); |
| 11464 |
|
| 11465 |
/* |
| 11466 |
* Wait for 1ms to empty GLUE and PCI-E core queues, |
| 11467 |
* PSWHST, GRC and PSWRD Tetris buffer. |
| 11468 |
*/ |
| 11469 |
DELAY(1000); |
| 11470 |
|
| 11471 |
/* Prepare to chip reset: */ |
| 11472 |
/* MCP */ |
| 11473 |
if (global) { |
| 11474 |
bxe_reset_mcp_prep(sc, &val); |
| 11475 |
} |
| 11476 |
|
| 11477 |
/* PXP */ |
| 11478 |
bxe_pxp_prep(sc); |
| 11479 |
mb(); |
| 11480 |
|
| 11481 |
/* reset the chip */ |
| 11482 |
bxe_process_kill_chip_reset(sc, global); |
| 11483 |
mb(); |
| 11484 |
|
| 11485 |
/* Recover after reset: */ |
| 11486 |
/* MCP */ |
| 11487 |
if (global && bxe_reset_mcp_comp(sc, val)) { |
| 11488 |
return (-1); |
| 11489 |
} |
| 11490 |
|
| 11491 |
/* XXX add resetting the NO_MCP mode DB here */ |
| 11492 |
|
| 11493 |
/* Open the gates #2, #3 and #4 */ |
| 11494 |
bxe_set_234_gates(sc, FALSE); |
| 11495 |
|
| 11496 |
/* XXX |
| 11497 |
* IGU/AEU preparation bring back the AEU/IGU to a reset state |
| 11498 |
* re-enable attentions |
| 11499 |
*/ |
| 11500 |
|
| 11501 |
return (0); |
| 11502 |
} |
| 11503 |
|
| 11504 |
static int |
| 11505 |
bxe_leader_reset(struct bxe_softc *sc) |
| 11506 |
{ |
| 11507 |
int rc = 0; |
| 11508 |
uint8_t global = bxe_reset_is_global(sc); |
| 11509 |
uint32_t load_code; |
| 11510 |
|
| 11511 |
/* |
| 11512 |
* If not going to reset MCP, load "fake" driver to reset HW while |
| 11513 |
* driver is owner of the HW. |
| 11514 |
*/ |
| 11515 |
if (!global && !BXE_NOMCP(sc)) { |
| 11516 |
load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, |
| 11517 |
DRV_MSG_CODE_LOAD_REQ_WITH_LFA); |
| 11518 |
if (!load_code) { |
| 11519 |
BLOGE(sc, "MCP response failure, aborting\n"); |
| 11520 |
rc = -1; |
| 11521 |
goto exit_leader_reset; |
| 11522 |
} |
| 11523 |
|
| 11524 |
if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && |
| 11525 |
(load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { |
| 11526 |
BLOGE(sc, "MCP unexpected response, aborting\n"); |
| 11527 |
rc = -1; |
| 11528 |
goto exit_leader_reset2; |
| 11529 |
} |
| 11530 |
|
| 11531 |
load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); |
| 11532 |
if (!load_code) { |
| 11533 |
BLOGE(sc, "MCP response failure, aborting\n"); |
| 11534 |
rc = -1; |
| 11535 |
goto exit_leader_reset2; |
| 11536 |
} |
| 11537 |
} |
| 11538 |
|
| 11539 |
/* try to recover after the failure */ |
| 11540 |
if (bxe_process_kill(sc, global)) { |
| 11541 |
BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); |
| 11542 |
rc = -1; |
| 11543 |
goto exit_leader_reset2; |
| 11544 |
} |
| 11545 |
|
| 11546 |
/* |
| 11547 |
* Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver |
| 11548 |
* state. |
| 11549 |
*/ |
| 11550 |
bxe_set_reset_done(sc); |
| 11551 |
if (global) { |
| 11552 |
bxe_clear_reset_global(sc); |
| 11553 |
} |
| 11554 |
|
| 11555 |
exit_leader_reset2: |
| 11556 |
|
| 11557 |
/* unload "fake driver" if it was loaded */ |
| 11558 |
if (!global && !BXE_NOMCP(sc)) { |
| 11559 |
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); |
| 11560 |
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); |
| 11561 |
} |
| 11562 |
|
| 11563 |
exit_leader_reset: |
| 11564 |
|
| 11565 |
sc->is_leader = 0; |
| 11566 |
bxe_release_leader_lock(sc); |
| 11567 |
|
| 11568 |
mb(); |
| 11569 |
return (rc); |
| 11570 |
} |
| 11571 |
|
| 11572 |
/* |
| 11573 |
* prepare INIT transition, parameters configured: |
| 11574 |
* - HC configuration |
| 11575 |
* - Queue's CDU context |
| 11576 |
*/ |
| 11577 |
static void |
| 11578 |
bxe_pf_q_prep_init(struct bxe_softc *sc, |
| 11579 |
struct bxe_fastpath *fp, |
| 11580 |
struct ecore_queue_init_params *init_params) |
| 11581 |
{ |
| 11582 |
uint8_t cos; |
| 11583 |
int cxt_index, cxt_offset; |
| 11584 |
|
| 11585 |
bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); |
| 11586 |
bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); |
| 11587 |
|
| 11588 |
bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); |
| 11589 |
bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); |
| 11590 |
|
| 11591 |
/* HC rate */ |
| 11592 |
init_params->rx.hc_rate = |
| 11593 |
sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; |
| 11594 |
init_params->tx.hc_rate = |
| 11595 |
sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; |
| 11596 |
|
| 11597 |
/* FW SB ID */ |
| 11598 |
init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; |
| 11599 |
|
| 11600 |
/* CQ index among the SB indices */ |
| 11601 |
init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; |
| 11602 |
init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; |
| 11603 |
|
| 11604 |
/* set maximum number of COSs supported by this queue */ |
| 11605 |
init_params->max_cos = sc->max_cos; |
| 11606 |
|
| 11607 |
BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", |
| 11608 |
fp->index, init_params->max_cos); |
| 11609 |
|
| 11610 |
/* set the context pointers queue object */ |
| 11611 |
for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { |
| 11612 |
/* XXX change index/cid here if ever support multiple tx CoS */ |
| 11613 |
/* fp->txdata[cos]->cid */ |
| 11614 |
cxt_index = fp->index / ILT_PAGE_CIDS; |
| 11615 |
cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); |
| 11616 |
init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; |
| 11617 |
} |
| 11618 |
} |
| 11619 |
|
| 11620 |
/* set flags that are common for the Tx-only and not normal connections */ |
| 11621 |
static unsigned long |
| 11622 |
bxe_get_common_flags(struct bxe_softc *sc, |
| 11623 |
struct bxe_fastpath *fp, |
| 11624 |
uint8_t zero_stats) |
| 11625 |
{ |
| 11626 |
unsigned long flags = 0; |
| 11627 |
|
| 11628 |
/* PF driver will always initialize the Queue to an ACTIVE state */ |
| 11629 |
bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); |
| 11630 |
|
| 11631 |
/* |
| 11632 |
* tx only connections collect statistics (on the same index as the |
| 11633 |
* parent connection). The statistics are zeroed when the parent |
| 11634 |
* connection is initialized. |
| 11635 |
*/ |
| 11636 |
|
| 11637 |
bxe_set_bit(ECORE_Q_FLG_STATS, &flags); |
| 11638 |
if (zero_stats) { |
| 11639 |
bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); |
| 11640 |
} |
| 11641 |
|
| 11642 |
/* |
| 11643 |
* tx only connections can support tx-switching, though their |
| 11644 |
* CoS-ness doesn't survive the loopback |
| 11645 |
*/ |
| 11646 |
if (sc->flags & BXE_TX_SWITCHING) { |
| 11647 |
bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); |
| 11648 |
} |
| 11649 |
|
| 11650 |
bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); |
| 11651 |
|
| 11652 |
return (flags); |
| 11653 |
} |
| 11654 |
|
| 11655 |
static unsigned long |
| 11656 |
bxe_get_q_flags(struct bxe_softc *sc, |
| 11657 |
struct bxe_fastpath *fp, |
| 11658 |
uint8_t leading) |
| 11659 |
{ |
| 11660 |
unsigned long flags = 0; |
| 11661 |
|
| 11662 |
if (IS_MF_SD(sc)) { |
| 11663 |
bxe_set_bit(ECORE_Q_FLG_OV, &flags); |
| 11664 |
} |
| 11665 |
|
| 11666 |
if (if_getcapenable(sc->ifp) & IFCAP_LRO) { |
| 11667 |
bxe_set_bit(ECORE_Q_FLG_TPA, &flags); |
| 11668 |
bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); |
| 11669 |
#if 0 |
| 11670 |
if (fp->mode == TPA_MODE_GRO) |
| 11671 |
__set_bit(ECORE_Q_FLG_TPA_GRO, &flags); |
| 11672 |
#endif |
| 11673 |
} |
| 11674 |
|
| 11675 |
if (leading) { |
| 11676 |
bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); |
| 11677 |
bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); |
| 11678 |
} |
| 11679 |
|
| 11680 |
bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); |
| 11681 |
|
| 11682 |
#if 0 |
| 11683 |
/* configure silent vlan removal */ |
| 11684 |
if (IS_MF_AFEX(sc)) { |
| 11685 |
bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags); |
| 11686 |
} |
| 11687 |
#endif |
| 11688 |
|
| 11689 |
/* merge with common flags */ |
| 11690 |
return (flags | bxe_get_common_flags(sc, fp, TRUE)); |
| 11691 |
} |
| 11692 |
|
| 11693 |
static void |
| 11694 |
bxe_pf_q_prep_general(struct bxe_softc *sc, |
| 11695 |
struct bxe_fastpath *fp, |
| 11696 |
struct ecore_general_setup_params *gen_init, |
| 11697 |
uint8_t cos) |
| 11698 |
{ |
| 11699 |
gen_init->stat_id = bxe_stats_id(fp); |
| 11700 |
gen_init->spcl_id = fp->cl_id; |
| 11701 |
gen_init->mtu = sc->mtu; |
| 11702 |
gen_init->cos = cos; |
| 11703 |
} |
| 11704 |
|
| 11705 |
static void |
| 11706 |
bxe_pf_rx_q_prep(struct bxe_softc *sc, |
| 11707 |
struct bxe_fastpath *fp, |
| 11708 |
struct rxq_pause_params *pause, |
| 11709 |
struct ecore_rxq_setup_params *rxq_init) |
| 11710 |
{ |
| 11711 |
uint8_t max_sge = 0; |
| 11712 |
uint16_t sge_sz = 0; |
| 11713 |
uint16_t tpa_agg_size = 0; |
| 11714 |
|
| 11715 |
if (if_getcapenable(sc->ifp) & IFCAP_LRO) { |
| 11716 |
pause->sge_th_lo = SGE_TH_LO(sc); |
| 11717 |
pause->sge_th_hi = SGE_TH_HI(sc); |
| 11718 |
|
| 11719 |
/* validate SGE ring has enough to cross high threshold */ |
| 11720 |
if (sc->dropless_fc && |
| 11721 |
(pause->sge_th_hi + FW_PREFETCH_CNT) > |
| 11722 |
(RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { |
| 11723 |
BLOGW(sc, "sge ring threshold limit\n"); |
| 11724 |
} |
| 11725 |
|
| 11726 |
/* minimum max_aggregation_size is 2*MTU (two full buffers) */ |
| 11727 |
tpa_agg_size = (2 * sc->mtu); |
| 11728 |
if (tpa_agg_size < sc->max_aggregation_size) { |
| 11729 |
tpa_agg_size = sc->max_aggregation_size; |
| 11730 |
} |
| 11731 |
|
| 11732 |
max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; |
| 11733 |
max_sge = ((max_sge + PAGES_PER_SGE - 1) & |
| 11734 |
(~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; |
| 11735 |
sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); |
| 11736 |
} |
| 11737 |
|
| 11738 |
/* pause - not for e1 */ |
| 11739 |
if (!CHIP_IS_E1(sc)) { |
| 11740 |
pause->bd_th_lo = BD_TH_LO(sc); |
| 11741 |
pause->bd_th_hi = BD_TH_HI(sc); |
| 11742 |
|
| 11743 |
pause->rcq_th_lo = RCQ_TH_LO(sc); |
| 11744 |
pause->rcq_th_hi = RCQ_TH_HI(sc); |
| 11745 |
|
| 11746 |
/* validate rings have enough entries to cross high thresholds */ |
| 11747 |
if (sc->dropless_fc && |
| 11748 |
pause->bd_th_hi + FW_PREFETCH_CNT > |
| 11749 |
sc->rx_ring_size) { |
| 11750 |
BLOGW(sc, "rx bd ring threshold limit\n"); |
| 11751 |
} |
| 11752 |
|
| 11753 |
if (sc->dropless_fc && |
| 11754 |
pause->rcq_th_hi + FW_PREFETCH_CNT > |
| 11755 |
RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { |
| 11756 |
BLOGW(sc, "rcq ring threshold limit\n"); |
| 11757 |
} |
| 11758 |
|
| 11759 |
pause->pri_map = 1; |
| 11760 |
} |
| 11761 |
|
| 11762 |
/* rxq setup */ |
| 11763 |
rxq_init->dscr_map = fp->rx_dma.paddr; |
| 11764 |
rxq_init->sge_map = fp->rx_sge_dma.paddr; |
| 11765 |
rxq_init->rcq_map = fp->rcq_dma.paddr; |
| 11766 |
rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); |
| 11767 |
|
| 11768 |
/* |
| 11769 |
* This should be a maximum number of data bytes that may be |
| 11770 |
* placed on the BD (not including paddings). |
| 11771 |
*/ |
| 11772 |
rxq_init->buf_sz = (fp->rx_buf_size - |
| 11773 |
IP_HEADER_ALIGNMENT_PADDING); |
| 11774 |
|
| 11775 |
rxq_init->cl_qzone_id = fp->cl_qzone_id; |
| 11776 |
rxq_init->tpa_agg_sz = tpa_agg_size; |
| 11777 |
rxq_init->sge_buf_sz = sge_sz; |
| 11778 |
rxq_init->max_sges_pkt = max_sge; |
| 11779 |
rxq_init->rss_engine_id = SC_FUNC(sc); |
| 11780 |
rxq_init->mcast_engine_id = SC_FUNC(sc); |
| 11781 |
|
| 11782 |
/* |
| 11783 |
* Maximum number or simultaneous TPA aggregation for this Queue. |
| 11784 |
* For PF Clients it should be the maximum available number. |
| 11785 |
* VF driver(s) may want to define it to a smaller value. |
| 11786 |
*/ |
| 11787 |
rxq_init->max_tpa_queues = MAX_AGG_QS(sc); |
| 11788 |
|
| 11789 |
rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; |
| 11790 |
rxq_init->fw_sb_id = fp->fw_sb_id; |
| 11791 |
|
| 11792 |
rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; |
| 11793 |
|
| 11794 |
/* |
| 11795 |
* configure silent vlan removal |
| 11796 |
* if multi function mode is afex, then mask default vlan |
| 11797 |
*/ |
| 11798 |
if (IS_MF_AFEX(sc)) { |
| 11799 |
rxq_init->silent_removal_value = |
| 11800 |
sc->devinfo.mf_info.afex_def_vlan_tag; |
| 11801 |
rxq_init->silent_removal_mask = EVL_VLID_MASK; |
| 11802 |
} |
| 11803 |
} |
| 11804 |
|
| 11805 |
static void |
| 11806 |
bxe_pf_tx_q_prep(struct bxe_softc *sc, |
| 11807 |
struct bxe_fastpath *fp, |
| 11808 |
struct ecore_txq_setup_params *txq_init, |
| 11809 |
uint8_t cos) |
| 11810 |
{ |
| 11811 |
/* |
| 11812 |
* XXX If multiple CoS is ever supported then each fastpath structure |
| 11813 |
* will need to maintain tx producer/consumer/dma/etc values *per* CoS. |
| 11814 |
* fp->txdata[cos]->tx_dma.paddr; |
| 11815 |
*/ |
| 11816 |
txq_init->dscr_map = fp->tx_dma.paddr; |
| 11817 |
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; |
| 11818 |
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; |
| 11819 |
txq_init->fw_sb_id = fp->fw_sb_id; |
| 11820 |
|
| 11821 |
/* |
| 11822 |
* set the TSS leading client id for TX classfication to the |
| 11823 |
* leading RSS client id |
| 11824 |
*/ |
| 11825 |
txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); |
| 11826 |
} |
| 11827 |
|
| 11828 |
/* |
| 11829 |
* This function performs 2 steps in a queue state machine: |
| 11830 |
* 1) RESET->INIT |
| 11831 |
* 2) INIT->SETUP |
| 11832 |
*/ |
| 11833 |
static int |
| 11834 |
bxe_setup_queue(struct bxe_softc *sc, |
| 11835 |
struct bxe_fastpath *fp, |
| 11836 |
uint8_t leading) |
| 11837 |
{ |
| 11838 |
struct ecore_queue_state_params q_params = { NULL }; |
| 11839 |
struct ecore_queue_setup_params *setup_params = |
| 11840 |
&q_params.params.setup; |
| 11841 |
#if 0 |
| 11842 |
struct ecore_queue_setup_tx_only_params *tx_only_params = |
| 11843 |
&q_params.params.tx_only; |
| 11844 |
uint8_t tx_index; |
| 11845 |
#endif |
| 11846 |
int rc; |
| 11847 |
|
| 11848 |
BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); |
| 11849 |
|
| 11850 |
bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); |
| 11851 |
|
| 11852 |
q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; |
| 11853 |
|
| 11854 |
/* we want to wait for completion in this context */ |
| 11855 |
bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); |
| 11856 |
|
| 11857 |
/* prepare the INIT parameters */ |
| 11858 |
bxe_pf_q_prep_init(sc, fp, &q_params.params.init); |
| 11859 |
|
| 11860 |
/* Set the command */ |
| 11861 |
q_params.cmd = ECORE_Q_CMD_INIT; |
| 11862 |
|
| 11863 |
/* Change the state to INIT */ |
| 11864 |
rc = ecore_queue_state_change(sc, &q_params); |
| 11865 |
if (rc) { |
| 11866 |
BLOGE(sc, "Queue(%d) INIT failed\n", fp->index); |
| 11867 |
return (rc); |
| 11868 |
} |
| 11869 |
|
| 11870 |
BLOGD(sc, DBG_LOAD, "init complete\n"); |
| 11871 |
|
| 11872 |
/* now move the Queue to the SETUP state */ |
| 11873 |
memset(setup_params, 0, sizeof(*setup_params)); |
| 11874 |
|
| 11875 |
/* set Queue flags */ |
| 11876 |
setup_params->flags = bxe_get_q_flags(sc, fp, leading); |
| 11877 |
|
| 11878 |
/* set general SETUP parameters */ |
| 11879 |
bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, |
| 11880 |
FIRST_TX_COS_INDEX); |
| 11881 |
|
| 11882 |
bxe_pf_rx_q_prep(sc, fp, |
| 11883 |
&setup_params->pause_params, |
| 11884 |
&setup_params->rxq_params); |
| 11885 |
|
| 11886 |
bxe_pf_tx_q_prep(sc, fp, |
| 11887 |
&setup_params->txq_params, |
| 11888 |
FIRST_TX_COS_INDEX); |
| 11889 |
|
| 11890 |
/* Set the command */ |
| 11891 |
q_params.cmd = ECORE_Q_CMD_SETUP; |
| 11892 |
|
| 11893 |
/* change the state to SETUP */ |
| 11894 |
rc = ecore_queue_state_change(sc, &q_params); |
| 11895 |
if (rc) { |
| 11896 |
BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index); |
| 11897 |
return (rc); |
| 11898 |
} |
| 11899 |
|
| 11900 |
#if 0 |
| 11901 |
/* loop through the relevant tx-only indices */ |
| 11902 |
for (tx_index = FIRST_TX_ONLY_COS_INDEX; |
| 11903 |
tx_index < sc->max_cos; |
| 11904 |
tx_index++) { |
| 11905 |
/* prepare and send tx-only ramrod*/ |
| 11906 |
rc = bxe_setup_tx_only(sc, fp, &q_params, |
| 11907 |
tx_only_params, tx_index, leading); |
| 11908 |
if (rc) { |
| 11909 |
BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n", |
| 11910 |
fp->index, tx_index); |
| 11911 |
return (rc); |
| 11912 |
} |
| 11913 |
} |
| 11914 |
#endif |
| 11915 |
|
| 11916 |
return (rc); |
| 11917 |
} |
| 11918 |
|
| 11919 |
static int |
| 11920 |
bxe_setup_leading(struct bxe_softc *sc) |
| 11921 |
{ |
| 11922 |
return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); |
| 11923 |
} |
| 11924 |
|
| 11925 |
static int |
| 11926 |
bxe_config_rss_pf(struct bxe_softc *sc, |
| 11927 |
struct ecore_rss_config_obj *rss_obj, |
| 11928 |
uint8_t config_hash) |
| 11929 |
{ |
| 11930 |
struct ecore_config_rss_params params = { NULL }; |
| 11931 |
int i; |
| 11932 |
|
| 11933 |
/* |
| 11934 |
* Although RSS is meaningless when there is a single HW queue we |
| 11935 |
* still need it enabled in order to have HW Rx hash generated. |
| 11936 |
*/ |
| 11937 |
|
| 11938 |
params.rss_obj = rss_obj; |
| 11939 |
|
| 11940 |
bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); |
| 11941 |
|
| 11942 |
bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); |
| 11943 |
|
| 11944 |
/* RSS configuration */ |
| 11945 |
bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); |
| 11946 |
bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); |
| 11947 |
bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); |
| 11948 |
bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); |
| 11949 |
if (rss_obj->udp_rss_v4) { |
| 11950 |
bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); |
| 11951 |
} |
| 11952 |
if (rss_obj->udp_rss_v6) { |
| 11953 |
bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); |
| 11954 |
} |
| 11955 |
|
| 11956 |
/* Hash bits */ |
| 11957 |
params.rss_result_mask = MULTI_MASK; |
| 11958 |
|
| 11959 |
memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); |
| 11960 |
|
| 11961 |
if (config_hash) { |
| 11962 |
/* RSS keys */ |
| 11963 |
for (i = 0; i < sizeof(params.rss_key) / 4; i++) { |
| 11964 |
params.rss_key[i] = arc4random(); |
| 11965 |
} |
| 11966 |
|
| 11967 |
bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); |
| 11968 |
} |
| 11969 |
|
| 11970 |
return (ecore_config_rss(sc, ¶ms)); |
| 11971 |
} |
| 11972 |
|
| 11973 |
static int |
| 11974 |
bxe_config_rss_eth(struct bxe_softc *sc, |
| 11975 |
uint8_t config_hash) |
| 11976 |
{ |
| 11977 |
return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); |
| 11978 |
} |
| 11979 |
|
| 11980 |
static int |
| 11981 |
bxe_init_rss_pf(struct bxe_softc *sc) |
| 11982 |
{ |
| 11983 |
uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); |
| 11984 |
int i; |
| 11985 |
|
| 11986 |
/* |
| 11987 |
* Prepare the initial contents of the indirection table if |
| 11988 |
* RSS is enabled |
| 11989 |
*/ |
| 11990 |
for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { |
| 11991 |
sc->rss_conf_obj.ind_table[i] = |
| 11992 |
(sc->fp->cl_id + (i % num_eth_queues)); |
| 11993 |
} |
| 11994 |
|
| 11995 |
if (sc->udp_rss) { |
| 11996 |
sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; |
| 11997 |
} |
| 11998 |
|
| 11999 |
/* |
| 12000 |
* For 57710 and 57711 SEARCHER configuration (rss_keys) is |
| 12001 |
* per-port, so if explicit configuration is needed, do it only |
| 12002 |
* for a PMF. |
| 12003 |
* |
| 12004 |
* For 57712 and newer it's a per-function configuration. |
| 12005 |
*/ |
| 12006 |
return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); |
| 12007 |
} |
| 12008 |
|
| 12009 |
static int |
| 12010 |
bxe_set_mac_one(struct bxe_softc *sc, |
| 12011 |
uint8_t *mac, |
| 12012 |
struct ecore_vlan_mac_obj *obj, |
| 12013 |
uint8_t set, |
| 12014 |
int mac_type, |
| 12015 |
unsigned long *ramrod_flags) |
| 12016 |
{ |
| 12017 |
struct ecore_vlan_mac_ramrod_params ramrod_param; |
| 12018 |
int rc; |
| 12019 |
|
| 12020 |
memset(&ramrod_param, 0, sizeof(ramrod_param)); |
| 12021 |
|
| 12022 |
/* fill in general parameters */ |
| 12023 |
ramrod_param.vlan_mac_obj = obj; |
| 12024 |
ramrod_param.ramrod_flags = *ramrod_flags; |
| 12025 |
|
| 12026 |
/* fill a user request section if needed */ |
| 12027 |
if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { |
| 12028 |
memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); |
| 12029 |
|
| 12030 |
bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); |
| 12031 |
|
| 12032 |
/* Set the command: ADD or DEL */ |
| 12033 |
ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : |
| 12034 |
ECORE_VLAN_MAC_DEL; |
| 12035 |
} |
| 12036 |
|
| 12037 |
rc = ecore_config_vlan_mac(sc, &ramrod_param); |
| 12038 |
|
| 12039 |
if (rc == ECORE_EXISTS) { |
| 12040 |
BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); |
| 12041 |
/* do not treat adding same MAC as error */ |
| 12042 |
rc = 0; |
| 12043 |
} else if (rc < 0) { |
| 12044 |
BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); |
| 12045 |
} |
| 12046 |
|
| 12047 |
return (rc); |
| 12048 |
} |
| 12049 |
|
| 12050 |
static int |
| 12051 |
bxe_set_eth_mac(struct bxe_softc *sc, |
| 12052 |
uint8_t set) |
| 12053 |
{ |
| 12054 |
unsigned long ramrod_flags = 0; |
| 12055 |
|
| 12056 |
BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); |
| 12057 |
|
| 12058 |
bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); |
| 12059 |
|
| 12060 |
/* Eth MAC is set on RSS leading client (fp[0]) */ |
| 12061 |
return (bxe_set_mac_one(sc, sc->link_params.mac_addr, |
| 12062 |
&sc->sp_objs->mac_obj, |
| 12063 |
set, ECORE_ETH_MAC, &ramrod_flags)); |
| 12064 |
} |
| 12065 |
|
| 12066 |
#if 0 |
| 12067 |
static void |
| 12068 |
bxe_update_max_mf_config(struct bxe_softc *sc, |
| 12069 |
uint32_t value) |
| 12070 |
{ |
| 12071 |
/* load old values */ |
| 12072 |
uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)]; |
| 12073 |
|
| 12074 |
if (value != bxe_extract_max_cfg(sc, mf_cfg)) { |
| 12075 |
/* leave all but MAX value */ |
| 12076 |
mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; |
| 12077 |
|
| 12078 |
/* set new MAX value */ |
| 12079 |
mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) & |
| 12080 |
FUNC_MF_CFG_MAX_BW_MASK); |
| 12081 |
|
| 12082 |
bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg); |
| 12083 |
} |
| 12084 |
} |
| 12085 |
#endif |
| 12086 |
|
| 12087 |
static int |
| 12088 |
bxe_get_cur_phy_idx(struct bxe_softc *sc) |
| 12089 |
{ |
| 12090 |
uint32_t sel_phy_idx = 0; |
| 12091 |
|
| 12092 |
if (sc->link_params.num_phys <= 1) { |
| 12093 |
return (ELINK_INT_PHY); |
| 12094 |
} |
| 12095 |
|
| 12096 |
if (sc->link_vars.link_up) { |
| 12097 |
sel_phy_idx = ELINK_EXT_PHY1; |
| 12098 |
/* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ |
| 12099 |
if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && |
| 12100 |
(sc->link_params.phy[ELINK_EXT_PHY2].supported & |
| 12101 |
ELINK_SUPPORTED_FIBRE)) |
| 12102 |
sel_phy_idx = ELINK_EXT_PHY2; |
| 12103 |
} else { |
| 12104 |
switch (elink_phy_selection(&sc->link_params)) { |
| 12105 |
case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: |
| 12106 |
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: |
| 12107 |
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: |
| 12108 |
sel_phy_idx = ELINK_EXT_PHY1; |
| 12109 |
break; |
| 12110 |
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: |
| 12111 |
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: |
| 12112 |
sel_phy_idx = ELINK_EXT_PHY2; |
| 12113 |
break; |
| 12114 |
} |
| 12115 |
} |
| 12116 |
|
| 12117 |
return (sel_phy_idx); |
| 12118 |
} |
| 12119 |
|
| 12120 |
static int |
| 12121 |
bxe_get_link_cfg_idx(struct bxe_softc *sc) |
| 12122 |
{ |
| 12123 |
uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); |
| 12124 |
|
| 12125 |
/* |
| 12126 |
* The selected activated PHY is always after swapping (in case PHY |
| 12127 |
* swapping is enabled). So when swapping is enabled, we need to reverse |
| 12128 |
* the configuration |
| 12129 |
*/ |
| 12130 |
|
| 12131 |
if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { |
| 12132 |
if (sel_phy_idx == ELINK_EXT_PHY1) |
| 12133 |
sel_phy_idx = ELINK_EXT_PHY2; |
| 12134 |
else if (sel_phy_idx == ELINK_EXT_PHY2) |
| 12135 |
sel_phy_idx = ELINK_EXT_PHY1; |
| 12136 |
} |
| 12137 |
|
| 12138 |
return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); |
| 12139 |
} |
| 12140 |
|
| 12141 |
static void |
| 12142 |
bxe_set_requested_fc(struct bxe_softc *sc) |
| 12143 |
{ |
| 12144 |
/* |
| 12145 |
* Initialize link parameters structure variables |
| 12146 |
* It is recommended to turn off RX FC for jumbo frames |
| 12147 |
* for better performance |
| 12148 |
*/ |
| 12149 |
if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { |
| 12150 |
sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; |
| 12151 |
} else { |
| 12152 |
sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; |
| 12153 |
} |
| 12154 |
} |
| 12155 |
|
| 12156 |
static void |
| 12157 |
bxe_calc_fc_adv(struct bxe_softc *sc) |
| 12158 |
{ |
| 12159 |
uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); |
| 12160 |
switch (sc->link_vars.ieee_fc & |
| 12161 |
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { |
| 12162 |
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: |
| 12163 |
default: |
| 12164 |
sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | |
| 12165 |
ADVERTISED_Pause); |
| 12166 |
break; |
| 12167 |
|
| 12168 |
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: |
| 12169 |
sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | |
| 12170 |
ADVERTISED_Pause); |
| 12171 |
break; |
| 12172 |
|
| 12173 |
case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: |
| 12174 |
sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; |
| 12175 |
break; |
| 12176 |
} |
| 12177 |
} |
| 12178 |
|
| 12179 |
static uint16_t |
| 12180 |
bxe_get_mf_speed(struct bxe_softc *sc) |
| 12181 |
{ |
| 12182 |
uint16_t line_speed = sc->link_vars.line_speed; |
| 12183 |
if (IS_MF(sc)) { |
| 12184 |
uint16_t maxCfg = |
| 12185 |
bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); |
| 12186 |
|
| 12187 |
/* calculate the current MAX line speed limit for the MF devices */ |
| 12188 |
if (IS_MF_SI(sc)) { |
| 12189 |
line_speed = (line_speed * maxCfg) / 100; |
| 12190 |
} else { /* SD mode */ |
| 12191 |
uint16_t vn_max_rate = maxCfg * 100; |
| 12192 |
|
| 12193 |
if (vn_max_rate < line_speed) { |
| 12194 |
line_speed = vn_max_rate; |
| 12195 |
} |
| 12196 |
} |
| 12197 |
} |
| 12198 |
|
| 12199 |
return (line_speed); |
| 12200 |
} |
| 12201 |
|
| 12202 |
static void |
| 12203 |
bxe_fill_report_data(struct bxe_softc *sc, |
| 12204 |
struct bxe_link_report_data *data) |
| 12205 |
{ |
| 12206 |
uint16_t line_speed = bxe_get_mf_speed(sc); |
| 12207 |
|
| 12208 |
memset(data, 0, sizeof(*data)); |
| 12209 |
|
| 12210 |
/* fill the report data with the effective line speed */ |
| 12211 |
data->line_speed = line_speed; |
| 12212 |
|
| 12213 |
/* Link is down */ |
| 12214 |
if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { |
| 12215 |
bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); |
| 12216 |
} |
| 12217 |
|
| 12218 |
/* Full DUPLEX */ |
| 12219 |
if (sc->link_vars.duplex == DUPLEX_FULL) { |
| 12220 |
bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); |
| 12221 |
} |
| 12222 |
|
| 12223 |
/* Rx Flow Control is ON */ |
| 12224 |
if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { |
| 12225 |
bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); |
| 12226 |
} |
| 12227 |
|
| 12228 |
/* Tx Flow Control is ON */ |
| 12229 |
if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { |
| 12230 |
bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); |
| 12231 |
} |
| 12232 |
} |
| 12233 |
|
| 12234 |
/* report link status to OS, should be called under phy_lock */ |
| 12235 |
static void |
| 12236 |
bxe_link_report_locked(struct bxe_softc *sc) |
| 12237 |
{ |
| 12238 |
struct bxe_link_report_data cur_data; |
| 12239 |
|
| 12240 |
/* reread mf_cfg */ |
| 12241 |
if (IS_PF(sc) && !CHIP_IS_E1(sc)) { |
| 12242 |
bxe_read_mf_cfg(sc); |
| 12243 |
} |
| 12244 |
|
| 12245 |
/* Read the current link report info */ |
| 12246 |
bxe_fill_report_data(sc, &cur_data); |
| 12247 |
|
| 12248 |
/* Don't report link down or exactly the same link status twice */ |
| 12249 |
if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || |
| 12250 |
(bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, |
| 12251 |
&sc->last_reported_link.link_report_flags) && |
| 12252 |
bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, |
| 12253 |
&cur_data.link_report_flags))) { |
| 12254 |
return; |
| 12255 |
} |
| 12256 |
|
| 12257 |
sc->link_cnt++; |
| 12258 |
|
| 12259 |
/* report new link params and remember the state for the next time */ |
| 12260 |
memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); |
| 12261 |
|
| 12262 |
if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, |
| 12263 |
&cur_data.link_report_flags)) { |
| 12264 |
if_linkstate_change_drv(sc->ifp, LINK_STATE_DOWN); |
| 12265 |
BLOGI(sc, "NIC Link is Down\n"); |
| 12266 |
} else { |
| 12267 |
const char *duplex; |
| 12268 |
const char *flow; |
| 12269 |
|
| 12270 |
if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, |
| 12271 |
&cur_data.link_report_flags)) { |
| 12272 |
duplex = "full"; |
| 12273 |
} else { |
| 12274 |
duplex = "half"; |
| 12275 |
} |
| 12276 |
|
| 12277 |
/* |
| 12278 |
* Handle the FC at the end so that only these flags would be |
| 12279 |
* possibly set. This way we may easily check if there is no FC |
| 12280 |
* enabled. |
| 12281 |
*/ |
| 12282 |
if (cur_data.link_report_flags) { |
| 12283 |
if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, |
| 12284 |
&cur_data.link_report_flags) && |
| 12285 |
bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, |
| 12286 |
&cur_data.link_report_flags)) { |
| 12287 |
flow = "ON - receive & transmit"; |
| 12288 |
} else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, |
| 12289 |
&cur_data.link_report_flags) && |
| 12290 |
!bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, |
| 12291 |
&cur_data.link_report_flags)) { |
| 12292 |
flow = "ON - receive"; |
| 12293 |
} else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, |
| 12294 |
&cur_data.link_report_flags) && |
| 12295 |
bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, |
| 12296 |
&cur_data.link_report_flags)) { |
| 12297 |
flow = "ON - transmit"; |
| 12298 |
} else { |
| 12299 |
flow = "none"; /* possible? */ |
| 12300 |
} |
| 12301 |
} else { |
| 12302 |
flow = "none"; |
| 12303 |
} |
| 12304 |
|
| 12305 |
if_linkstate_change_drv(sc->ifp, LINK_STATE_UP); |
| 12306 |
BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", |
| 12307 |
cur_data.line_speed, duplex, flow); |
| 12308 |
} |
| 12309 |
} |
| 12310 |
|
| 12311 |
static void |
| 12312 |
bxe_link_report(struct bxe_softc *sc) |
| 12313 |
{ |
| 12314 |
BXE_PHY_LOCK(sc); |
| 12315 |
bxe_link_report_locked(sc); |
| 12316 |
BXE_PHY_UNLOCK(sc); |
| 12317 |
} |
| 12318 |
|
| 12319 |
static void |
| 12320 |
bxe_link_status_update(struct bxe_softc *sc) |
| 12321 |
{ |
| 12322 |
if (sc->state != BXE_STATE_OPEN) { |
| 12323 |
return; |
| 12324 |
} |
| 12325 |
|
| 12326 |
#if 0 |
| 12327 |
/* read updated dcb configuration */ |
| 12328 |
if (IS_PF(sc)) |
| 12329 |
bxe_dcbx_pmf_update(sc); |
| 12330 |
#endif |
| 12331 |
|
| 12332 |
if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { |
| 12333 |
elink_link_status_update(&sc->link_params, &sc->link_vars); |
| 12334 |
} else { |
| 12335 |
sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | |
| 12336 |
ELINK_SUPPORTED_10baseT_Full | |
| 12337 |
ELINK_SUPPORTED_100baseT_Half | |
| 12338 |
ELINK_SUPPORTED_100baseT_Full | |
| 12339 |
ELINK_SUPPORTED_1000baseT_Full | |
| 12340 |
ELINK_SUPPORTED_2500baseX_Full | |
| 12341 |
ELINK_SUPPORTED_10000baseT_Full | |
| 12342 |
ELINK_SUPPORTED_TP | |
| 12343 |
ELINK_SUPPORTED_FIBRE | |
| 12344 |
ELINK_SUPPORTED_Autoneg | |
| 12345 |
ELINK_SUPPORTED_Pause | |
| 12346 |
ELINK_SUPPORTED_Asym_Pause); |
| 12347 |
sc->port.advertising[0] = sc->port.supported[0]; |
| 12348 |
|
| 12349 |
sc->link_params.sc = sc; |
| 12350 |
sc->link_params.port = SC_PORT(sc); |
| 12351 |
sc->link_params.req_duplex[0] = DUPLEX_FULL; |
| 12352 |
sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; |
| 12353 |
sc->link_params.req_line_speed[0] = SPEED_10000; |
| 12354 |
sc->link_params.speed_cap_mask[0] = 0x7f0000; |
| 12355 |
sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; |
| 12356 |
|
| 12357 |
if (CHIP_REV_IS_FPGA(sc)) { |
| 12358 |
sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; |
| 12359 |
sc->link_vars.line_speed = ELINK_SPEED_1000; |
| 12360 |
sc->link_vars.link_status = (LINK_STATUS_LINK_UP | |
| 12361 |
LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); |
| 12362 |
} else { |
| 12363 |
sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; |
| 12364 |
sc->link_vars.line_speed = ELINK_SPEED_10000; |
| 12365 |
sc->link_vars.link_status = (LINK_STATUS_LINK_UP | |
| 12366 |
LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); |
| 12367 |
} |
| 12368 |
|
| 12369 |
sc->link_vars.link_up = 1; |
| 12370 |
|
| 12371 |
sc->link_vars.duplex = DUPLEX_FULL; |
| 12372 |
sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; |
| 12373 |
|
| 12374 |
if (IS_PF(sc)) { |
| 12375 |
REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); |
| 12376 |
bxe_stats_handle(sc, STATS_EVENT_LINK_UP); |
| 12377 |
bxe_link_report(sc); |
| 12378 |
} |
| 12379 |
} |
| 12380 |
|
| 12381 |
if (IS_PF(sc)) { |
| 12382 |
if (sc->link_vars.link_up) { |
| 12383 |
bxe_stats_handle(sc, STATS_EVENT_LINK_UP); |
| 12384 |
} else { |
| 12385 |
bxe_stats_handle(sc, STATS_EVENT_STOP); |
| 12386 |
} |
| 12387 |
bxe_link_report(sc); |
| 12388 |
} else { |
| 12389 |
bxe_link_report(sc); |
| 12390 |
bxe_stats_handle(sc, STATS_EVENT_LINK_UP); |
| 12391 |
} |
| 12392 |
} |
| 12393 |
|
| 12394 |
static int |
| 12395 |
bxe_initial_phy_init(struct bxe_softc *sc, |
| 12396 |
int load_mode) |
| 12397 |
{ |
| 12398 |
int rc, cfg_idx = bxe_get_link_cfg_idx(sc); |
| 12399 |
uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; |
| 12400 |
struct elink_params *lp = &sc->link_params; |
| 12401 |
|
| 12402 |
bxe_set_requested_fc(sc); |
| 12403 |
|
| 12404 |
if (CHIP_REV_IS_SLOW(sc)) { |
| 12405 |
uint32_t bond = CHIP_BOND_ID(sc); |
| 12406 |
uint32_t feat = 0; |
| 12407 |
|
| 12408 |
if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { |
| 12409 |
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; |
| 12410 |
} else if (bond & 0x4) { |
| 12411 |
if (CHIP_IS_E3(sc)) { |
| 12412 |
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; |
| 12413 |
} else { |
| 12414 |
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; |
| 12415 |
} |
| 12416 |
} else if (bond & 0x8) { |
| 12417 |
if (CHIP_IS_E3(sc)) { |
| 12418 |
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; |
| 12419 |
} else { |
| 12420 |
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; |
| 12421 |
} |
| 12422 |
} |
| 12423 |
|
| 12424 |
/* disable EMAC for E3 and above */ |
| 12425 |
if (bond & 0x2) { |
| 12426 |
feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; |
| 12427 |
} |
| 12428 |
|
| 12429 |
sc->link_params.feature_config_flags |= feat; |
| 12430 |
} |
| 12431 |
|
| 12432 |
BXE_PHY_LOCK(sc); |
| 12433 |
|
| 12434 |
if (load_mode == LOAD_DIAG) { |
| 12435 |
lp->loopback_mode = ELINK_LOOPBACK_XGXS; |
| 12436 |
/* Prefer doing PHY loopback at 10G speed, if possible */ |
| 12437 |
if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { |
| 12438 |
if (lp->speed_cap_mask[cfg_idx] & |
| 12439 |
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { |
| 12440 |
lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; |
| 12441 |
} else { |
| 12442 |
lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; |
| 12443 |
} |
| 12444 |
} |
| 12445 |
} |
| 12446 |
|
| 12447 |
if (load_mode == LOAD_LOOPBACK_EXT) { |
| 12448 |
lp->loopback_mode = ELINK_LOOPBACK_EXT; |
| 12449 |
} |
| 12450 |
|
| 12451 |
rc = elink_phy_init(&sc->link_params, &sc->link_vars); |
| 12452 |
|
| 12453 |
BXE_PHY_UNLOCK(sc); |
| 12454 |
|
| 12455 |
bxe_calc_fc_adv(sc); |
| 12456 |
|
| 12457 |
if (sc->link_vars.link_up) { |
| 12458 |
bxe_stats_handle(sc, STATS_EVENT_LINK_UP); |
| 12459 |
bxe_link_report(sc); |
| 12460 |
} |
| 12461 |
|
| 12462 |
if (!CHIP_REV_IS_SLOW(sc)) { |
| 12463 |
bxe_periodic_start(sc); |
| 12464 |
} |
| 12465 |
|
| 12466 |
sc->link_params.req_line_speed[cfg_idx] = req_line_speed; |
| 12467 |
return (rc); |
| 12468 |
} |
| 12469 |
|
| 12470 |
/* must be called under IF_ADDR_LOCK */ |
| 12471 |
static int |
| 12472 |
bxe_init_mcast_macs_list(struct bxe_softc *sc, |
| 12473 |
struct ecore_mcast_ramrod_params *p) |
| 12474 |
{ |
| 12475 |
if_t ifp = sc->ifp; |
| 12476 |
int mc_count = 0; |
| 12477 |
int mcnt, i; |
| 12478 |
struct ecore_mcast_list_elem *mc_mac; |
| 12479 |
unsigned char *mta; |
| 12480 |
|
| 12481 |
mc_count = if_multiaddr_count(ifp, -1);/* XXX they don't have a limit */ |
| 12482 |
/* should we enforce one? */ |
| 12483 |
ECORE_LIST_INIT(&p->mcast_list); |
| 12484 |
p->mcast_list_len = 0; |
| 12485 |
|
| 12486 |
if (!mc_count) { |
| 12487 |
return (0); |
| 12488 |
} |
| 12489 |
|
| 12490 |
mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * |
| 12491 |
mc_count, M_DEVBUF, M_NOWAIT); |
| 12492 |
|
| 12493 |
if(mta == NULL) { |
| 12494 |
BLOGE(sc, "Failed to allocate temp mcast list\n"); |
| 12495 |
return (-1); |
| 12496 |
} |
| 12497 |
|
| 12498 |
mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, |
| 12499 |
(M_NOWAIT | M_ZERO)); |
| 12500 |
if (!mc_mac) { |
| 12501 |
free(mta, M_DEVBUF); |
| 12502 |
BLOGE(sc, "Failed to allocate temp mcast list\n"); |
| 12503 |
return (-1); |
| 12504 |
} |
| 12505 |
|
| 12506 |
if_multiaddr_array(ifp, mta, &mcnt, mc_count); /* mta and mcnt not expected |
| 12507 |
to be different */ |
| 12508 |
for(i=0; i< mcnt; i++) { |
| 12509 |
|
| 12510 |
bcopy((mta + (i * ETHER_ADDR_LEN)), mc_mac->mac, ETHER_ADDR_LEN); |
| 12511 |
ECORE_LIST_PUSH_TAIL(&mc_mac->link, &p->mcast_list); |
| 12512 |
|
| 12513 |
BLOGD(sc, DBG_LOAD, |
| 12514 |
"Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X\n", |
| 12515 |
mc_mac->mac[0], mc_mac->mac[1], mc_mac->mac[2], |
| 12516 |
mc_mac->mac[3], mc_mac->mac[4], mc_mac->mac[5]); |
| 12517 |
|
| 12518 |
mc_mac++; |
| 12519 |
} |
| 12520 |
|
| 12521 |
p->mcast_list_len = mc_count; |
| 12522 |
free(mta, M_DEVBUF); |
| 12523 |
|
| 12524 |
return (0); |
| 12525 |
} |
| 12526 |
|
| 12527 |
static void |
| 12528 |
bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p) |
| 12529 |
{ |
| 12530 |
struct ecore_mcast_list_elem *mc_mac = |
| 12531 |
ECORE_LIST_FIRST_ENTRY(&p->mcast_list, |
| 12532 |
struct ecore_mcast_list_elem, |
| 12533 |
link); |
| 12534 |
|
| 12535 |
if (mc_mac) { |
| 12536 |
/* only a single free as all mc_macs are in the same heap array */ |
| 12537 |
free(mc_mac, M_DEVBUF); |
| 12538 |
} |
| 12539 |
} |
| 12540 |
|
| 12541 |
static int |
| 12542 |
bxe_set_mc_list(struct bxe_softc *sc) |
| 12543 |
{ |
| 12544 |
struct ecore_mcast_ramrod_params rparam = { NULL }; |
| 12545 |
int rc = 0; |
| 12546 |
|
| 12547 |
rparam.mcast_obj = &sc->mcast_obj; |
| 12548 |
|
| 12549 |
BXE_MCAST_LOCK(sc); |
| 12550 |
|
| 12551 |
/* first, clear all configured multicast MACs */ |
| 12552 |
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); |
| 12553 |
if (rc < 0) { |
| 12554 |
BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); |
| 12555 |
return (rc); |
| 12556 |
} |
| 12557 |
|
| 12558 |
/* configure a new MACs list */ |
| 12559 |
rc = bxe_init_mcast_macs_list(sc, &rparam); |
| 12560 |
if (rc) { |
| 12561 |
BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc); |
| 12562 |
BXE_MCAST_UNLOCK(sc); |
| 12563 |
return (rc); |
| 12564 |
} |
| 12565 |
|
| 12566 |
/* Now add the new MACs */ |
| 12567 |
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); |
| 12568 |
if (rc < 0) { |
| 12569 |
BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); |
| 12570 |
} |
| 12571 |
|
| 12572 |
bxe_free_mcast_macs_list(&rparam); |
| 12573 |
|
| 12574 |
BXE_MCAST_UNLOCK(sc); |
| 12575 |
|
| 12576 |
return (rc); |
| 12577 |
} |
| 12578 |
|
| 12579 |
static int |
| 12580 |
bxe_set_uc_list(struct bxe_softc *sc) |
| 12581 |
{ |
| 12582 |
if_t ifp = sc->ifp; |
| 12583 |
struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; |
| 12584 |
struct ifaddr *ifa; |
| 12585 |
unsigned long ramrod_flags = 0; |
| 12586 |
int rc; |
| 12587 |
|
| 12588 |
#if __FreeBSD_version < 800000 |
| 12589 |
IF_ADDR_LOCK(ifp); |
| 12590 |
#else |
| 12591 |
if_addr_rlock_drv(ifp); |
| 12592 |
#endif |
| 12593 |
|
| 12594 |
/* first schedule a cleanup up of old configuration */ |
| 12595 |
rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); |
| 12596 |
if (rc < 0) { |
| 12597 |
BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); |
| 12598 |
#if __FreeBSD_version < 800000 |
| 12599 |
IF_ADDR_UNLOCK(ifp); |
| 12600 |
#else |
| 12601 |
if_addr_runlock_drv(ifp); |
| 12602 |
#endif |
| 12603 |
return (rc); |
| 12604 |
} |
| 12605 |
|
| 12606 |
ifa = if_getifaddr(ifp); /* XXX Is this structure */ |
| 12607 |
while (ifa) { |
| 12608 |
if (ifa->ifa_addr->sa_family != AF_LINK) { |
| 12609 |
ifa = TAILQ_NEXT(ifa, ifa_link); |
| 12610 |
continue; |
| 12611 |
} |
| 12612 |
|
| 12613 |
rc = bxe_set_mac_one(sc, (uint8_t *)LLADDR((struct sockaddr_dl *)ifa->ifa_addr), |
| 12614 |
mac_obj, TRUE, ECORE_UC_LIST_MAC, &ramrod_flags); |
| 12615 |
if (rc == -EEXIST) { |
| 12616 |
BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); |
| 12617 |
/* do not treat adding same MAC as an error */ |
| 12618 |
rc = 0; |
| 12619 |
} else if (rc < 0) { |
| 12620 |
BLOGE(sc, "Failed to schedule ADD operations (%d)\n", rc); |
| 12621 |
#if __FreeBSD_version < 800000 |
| 12622 |
IF_ADDR_UNLOCK(ifp); |
| 12623 |
#else |
| 12624 |
if_addr_runlock_drv(ifp); |
| 12625 |
#endif |
| 12626 |
return (rc); |
| 12627 |
} |
| 12628 |
|
| 12629 |
ifa = TAILQ_NEXT(ifa, ifa_link); |
| 12630 |
} |
| 12631 |
|
| 12632 |
#if __FreeBSD_version < 800000 |
| 12633 |
IF_ADDR_UNLOCK(ifp); |
| 12634 |
#else |
| 12635 |
if_addr_runlock_drv(ifp); |
| 12636 |
#endif |
| 12637 |
|
| 12638 |
/* Execute the pending commands */ |
| 12639 |
bit_set(&ramrod_flags, RAMROD_CONT); |
| 12640 |
return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, |
| 12641 |
ECORE_UC_LIST_MAC, &ramrod_flags)); |
| 12642 |
} |
| 12643 |
|
| 12644 |
static void |
| 12645 |
bxe_handle_rx_mode_tq(void *context, |
| 12646 |
int pending) |
| 12647 |
{ |
| 12648 |
struct bxe_softc *sc = (struct bxe_softc *)context; |
| 12649 |
if_t ifp = sc->ifp; |
| 12650 |
uint32_t rx_mode = BXE_RX_MODE_NORMAL; |
| 12651 |
|
| 12652 |
BXE_CORE_LOCK(sc); |
| 12653 |
|
| 12654 |
if (sc->state != BXE_STATE_OPEN) { |
| 12655 |
BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); |
| 12656 |
BXE_CORE_UNLOCK(sc); |
| 12657 |
return; |
| 12658 |
} |
| 12659 |
|
| 12660 |
BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); |
| 12661 |
|
| 12662 |
if (if_getflags(ifp) & IFF_PROMISC) { |
| 12663 |
rx_mode = BXE_RX_MODE_PROMISC; |
| 12664 |
} else if ((if_getflags(ifp) & IFF_ALLMULTI) || |
| 12665 |
((if_getamcount(ifp) > BXE_MAX_MULTICAST) && |
| 12666 |
CHIP_IS_E1(sc))) { |
| 12667 |
rx_mode = BXE_RX_MODE_ALLMULTI; |
| 12668 |
} else { |
| 12669 |
if (IS_PF(sc)) { |
| 12670 |
/* some multicasts */ |
| 12671 |
if (bxe_set_mc_list(sc) < 0) { |
| 12672 |
rx_mode = BXE_RX_MODE_ALLMULTI; |
| 12673 |
} |
| 12674 |
if (bxe_set_uc_list(sc) < 0) { |
| 12675 |
rx_mode = BXE_RX_MODE_PROMISC; |
| 12676 |
} |
| 12677 |
} |
| 12678 |
#if 0 |
| 12679 |
else { |
| 12680 |
/* |
| 12681 |
* Configuring mcast to a VF involves sleeping (when we |
| 12682 |
* wait for the PF's response). Since this function is |
| 12683 |
* called from a non sleepable context we must schedule |
| 12684 |
* a work item for this purpose |
| 12685 |
*/ |
| 12686 |
bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state); |
| 12687 |
schedule_delayed_work(&sc->sp_rtnl_task, 0); |
| 12688 |
} |
| 12689 |
#endif |
| 12690 |
} |
| 12691 |
|
| 12692 |
sc->rx_mode = rx_mode; |
| 12693 |
|
| 12694 |
/* schedule the rx_mode command */ |
| 12695 |
if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { |
| 12696 |
BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); |
| 12697 |
bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); |
| 12698 |
BXE_CORE_UNLOCK(sc); |
| 12699 |
return; |
| 12700 |
} |
| 12701 |
|
| 12702 |
if (IS_PF(sc)) { |
| 12703 |
bxe_set_storm_rx_mode(sc); |
| 12704 |
} |
| 12705 |
#if 0 |
| 12706 |
else { |
| 12707 |
/* |
| 12708 |
* Configuring mcast to a VF involves sleeping (when we |
| 12709 |
* wait for the PF's response). Since this function is |
| 12710 |
* called from a non sleepable context we must schedule |
| 12711 |
* a work item for this purpose |
| 12712 |
*/ |
| 12713 |
bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state); |
| 12714 |
schedule_delayed_work(&sc->sp_rtnl_task, 0); |
| 12715 |
} |
| 12716 |
#endif |
| 12717 |
|
| 12718 |
BXE_CORE_UNLOCK(sc); |
| 12719 |
} |
| 12720 |
|
| 12721 |
static void |
| 12722 |
bxe_set_rx_mode(struct bxe_softc *sc) |
| 12723 |
{ |
| 12724 |
taskqueue_enqueue(sc->rx_mode_tq, &sc->rx_mode_tq_task); |
| 12725 |
} |
| 12726 |
|
| 12727 |
/* update flags in shmem */ |
| 12728 |
static void |
| 12729 |
bxe_update_drv_flags(struct bxe_softc *sc, |
| 12730 |
uint32_t flags, |
| 12731 |
uint32_t set) |
| 12732 |
{ |
| 12733 |
uint32_t drv_flags; |
| 12734 |
|
| 12735 |
if (SHMEM2_HAS(sc, drv_flags)) { |
| 12736 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); |
| 12737 |
drv_flags = SHMEM2_RD(sc, drv_flags); |
| 12738 |
|
| 12739 |
if (set) { |
| 12740 |
SET_FLAGS(drv_flags, flags); |
| 12741 |
} else { |
| 12742 |
RESET_FLAGS(drv_flags, flags); |
| 12743 |
} |
| 12744 |
|
| 12745 |
SHMEM2_WR(sc, drv_flags, drv_flags); |
| 12746 |
BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); |
| 12747 |
|
| 12748 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); |
| 12749 |
} |
| 12750 |
} |
| 12751 |
|
| 12752 |
/* periodic timer callout routine, only runs when the interface is up */ |
| 12753 |
|
| 12754 |
static void |
| 12755 |
bxe_periodic_callout_func(void *xsc) |
| 12756 |
{ |
| 12757 |
struct bxe_softc *sc = (struct bxe_softc *)xsc; |
| 12758 |
int i; |
| 12759 |
|
| 12760 |
if (!BXE_CORE_TRYLOCK(sc)) { |
| 12761 |
/* just bail and try again next time */ |
| 12762 |
|
| 12763 |
if ((sc->state == BXE_STATE_OPEN) && |
| 12764 |
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { |
| 12765 |
/* schedule the next periodic callout */ |
| 12766 |
callout_reset(&sc->periodic_callout, hz, |
| 12767 |
bxe_periodic_callout_func, sc); |
| 12768 |
} |
| 12769 |
|
| 12770 |
return; |
| 12771 |
} |
| 12772 |
|
| 12773 |
if ((sc->state != BXE_STATE_OPEN) || |
| 12774 |
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { |
| 12775 |
BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); |
| 12776 |
BXE_CORE_UNLOCK(sc); |
| 12777 |
return; |
| 12778 |
} |
| 12779 |
|
| 12780 |
/* Check for TX timeouts on any fastpath. */ |
| 12781 |
FOR_EACH_QUEUE(sc, i) { |
| 12782 |
if (bxe_watchdog(sc, &sc->fp[i]) != 0) { |
| 12783 |
/* Ruh-Roh, chip was reset! */ |
| 12784 |
break; |
| 12785 |
} |
| 12786 |
} |
| 12787 |
|
| 12788 |
if (!CHIP_REV_IS_SLOW(sc)) { |
| 12789 |
/* |
| 12790 |
* This barrier is needed to ensure the ordering between the writing |
| 12791 |
* to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and |
| 12792 |
* the reading here. |
| 12793 |
*/ |
| 12794 |
mb(); |
| 12795 |
if (sc->port.pmf) { |
| 12796 |
BXE_PHY_LOCK(sc); |
| 12797 |
elink_period_func(&sc->link_params, &sc->link_vars); |
| 12798 |
BXE_PHY_UNLOCK(sc); |
| 12799 |
} |
| 12800 |
} |
| 12801 |
|
| 12802 |
if (IS_PF(sc) && !BXE_NOMCP(sc)) { |
| 12803 |
int mb_idx = SC_FW_MB_IDX(sc); |
| 12804 |
uint32_t drv_pulse; |
| 12805 |
uint32_t mcp_pulse; |
| 12806 |
|
| 12807 |
++sc->fw_drv_pulse_wr_seq; |
| 12808 |
sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; |
| 12809 |
|
| 12810 |
drv_pulse = sc->fw_drv_pulse_wr_seq; |
| 12811 |
bxe_drv_pulse(sc); |
| 12812 |
|
| 12813 |
mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & |
| 12814 |
MCP_PULSE_SEQ_MASK); |
| 12815 |
|
| 12816 |
/* |
| 12817 |
* The delta between driver pulse and mcp response should |
| 12818 |
* be 1 (before mcp response) or 0 (after mcp response). |
| 12819 |
*/ |
| 12820 |
if ((drv_pulse != mcp_pulse) && |
| 12821 |
(drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { |
| 12822 |
/* someone lost a heartbeat... */ |
| 12823 |
BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", |
| 12824 |
drv_pulse, mcp_pulse); |
| 12825 |
} |
| 12826 |
} |
| 12827 |
|
| 12828 |
/* state is BXE_STATE_OPEN */ |
| 12829 |
bxe_stats_handle(sc, STATS_EVENT_UPDATE); |
| 12830 |
|
| 12831 |
#if 0 |
| 12832 |
/* sample VF bulletin board for new posts from PF */ |
| 12833 |
if (IS_VF(sc)) { |
| 12834 |
bxe_sample_bulletin(sc); |
| 12835 |
} |
| 12836 |
#endif |
| 12837 |
|
| 12838 |
BXE_CORE_UNLOCK(sc); |
| 12839 |
|
| 12840 |
if ((sc->state == BXE_STATE_OPEN) && |
| 12841 |
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { |
| 12842 |
/* schedule the next periodic callout */ |
| 12843 |
callout_reset(&sc->periodic_callout, hz, |
| 12844 |
bxe_periodic_callout_func, sc); |
| 12845 |
} |
| 12846 |
} |
| 12847 |
|
| 12848 |
static void |
| 12849 |
bxe_periodic_start(struct bxe_softc *sc) |
| 12850 |
{ |
| 12851 |
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); |
| 12852 |
callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); |
| 12853 |
} |
| 12854 |
|
| 12855 |
static void |
| 12856 |
bxe_periodic_stop(struct bxe_softc *sc) |
| 12857 |
{ |
| 12858 |
atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); |
| 12859 |
callout_drain(&sc->periodic_callout); |
| 12860 |
} |
| 12861 |
|
| 12862 |
/* start the controller */ |
| 12863 |
static __noinline int |
| 12864 |
bxe_nic_load(struct bxe_softc *sc, |
| 12865 |
int load_mode) |
| 12866 |
{ |
| 12867 |
uint32_t val; |
| 12868 |
int load_code = 0; |
| 12869 |
int i, rc = 0; |
| 12870 |
|
| 12871 |
BXE_CORE_LOCK_ASSERT(sc); |
| 12872 |
|
| 12873 |
BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); |
| 12874 |
|
| 12875 |
sc->state = BXE_STATE_OPENING_WAITING_LOAD; |
| 12876 |
|
| 12877 |
if (IS_PF(sc)) { |
| 12878 |
/* must be called before memory allocation and HW init */ |
| 12879 |
bxe_ilt_set_info(sc); |
| 12880 |
} |
| 12881 |
|
| 12882 |
sc->last_reported_link_state = LINK_STATE_UNKNOWN; |
| 12883 |
|
| 12884 |
bxe_set_fp_rx_buf_size(sc); |
| 12885 |
|
| 12886 |
if (bxe_alloc_fp_buffers(sc) != 0) { |
| 12887 |
BLOGE(sc, "Failed to allocate fastpath memory\n"); |
| 12888 |
sc->state = BXE_STATE_CLOSED; |
| 12889 |
rc = ENOMEM; |
| 12890 |
goto bxe_nic_load_error0; |
| 12891 |
} |
| 12892 |
|
| 12893 |
if (bxe_alloc_mem(sc) != 0) { |
| 12894 |
sc->state = BXE_STATE_CLOSED; |
| 12895 |
rc = ENOMEM; |
| 12896 |
goto bxe_nic_load_error0; |
| 12897 |
} |
| 12898 |
|
| 12899 |
if (bxe_alloc_fw_stats_mem(sc) != 0) { |
| 12900 |
sc->state = BXE_STATE_CLOSED; |
| 12901 |
rc = ENOMEM; |
| 12902 |
goto bxe_nic_load_error0; |
| 12903 |
} |
| 12904 |
|
| 12905 |
if (IS_PF(sc)) { |
| 12906 |
/* set pf load just before approaching the MCP */ |
| 12907 |
bxe_set_pf_load(sc); |
| 12908 |
|
| 12909 |
/* if MCP exists send load request and analyze response */ |
| 12910 |
if (!BXE_NOMCP(sc)) { |
| 12911 |
/* attempt to load pf */ |
| 12912 |
if (bxe_nic_load_request(sc, &load_code) != 0) { |
| 12913 |
sc->state = BXE_STATE_CLOSED; |
| 12914 |
rc = ENXIO; |
| 12915 |
goto bxe_nic_load_error1; |
| 12916 |
} |
| 12917 |
|
| 12918 |
/* what did the MCP say? */ |
| 12919 |
if (bxe_nic_load_analyze_req(sc, load_code) != 0) { |
| 12920 |
bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); |
| 12921 |
sc->state = BXE_STATE_CLOSED; |
| 12922 |
rc = ENXIO; |
| 12923 |
goto bxe_nic_load_error2; |
| 12924 |
} |
| 12925 |
} else { |
| 12926 |
BLOGI(sc, "Device has no MCP!\n"); |
| 12927 |
load_code = bxe_nic_load_no_mcp(sc); |
| 12928 |
} |
| 12929 |
|
| 12930 |
/* mark PMF if applicable */ |
| 12931 |
bxe_nic_load_pmf(sc, load_code); |
| 12932 |
|
| 12933 |
/* Init Function state controlling object */ |
| 12934 |
bxe_init_func_obj(sc); |
| 12935 |
|
| 12936 |
/* Initialize HW */ |
| 12937 |
if (bxe_init_hw(sc, load_code) != 0) { |
| 12938 |
BLOGE(sc, "HW init failed\n"); |
| 12939 |
bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); |
| 12940 |
sc->state = BXE_STATE_CLOSED; |
| 12941 |
rc = ENXIO; |
| 12942 |
goto bxe_nic_load_error2; |
| 12943 |
} |
| 12944 |
} |
| 12945 |
|
| 12946 |
/* attach interrupts */ |
| 12947 |
if (bxe_interrupt_attach(sc) != 0) { |
| 12948 |
sc->state = BXE_STATE_CLOSED; |
| 12949 |
rc = ENXIO; |
| 12950 |
goto bxe_nic_load_error2; |
| 12951 |
} |
| 12952 |
|
| 12953 |
bxe_nic_init(sc, load_code); |
| 12954 |
|
| 12955 |
/* Init per-function objects */ |
| 12956 |
if (IS_PF(sc)) { |
| 12957 |
bxe_init_objs(sc); |
| 12958 |
// XXX bxe_iov_nic_init(sc); |
| 12959 |
|
| 12960 |
/* set AFEX default VLAN tag to an invalid value */ |
| 12961 |
sc->devinfo.mf_info.afex_def_vlan_tag = -1; |
| 12962 |
// XXX bxe_nic_load_afex_dcc(sc, load_code); |
| 12963 |
|
| 12964 |
sc->state = BXE_STATE_OPENING_WAITING_PORT; |
| 12965 |
rc = bxe_func_start(sc); |
| 12966 |
if (rc) { |
| 12967 |
BLOGE(sc, "Function start failed!\n"); |
| 12968 |
bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); |
| 12969 |
sc->state = BXE_STATE_ERROR; |
| 12970 |
goto bxe_nic_load_error3; |
| 12971 |
} |
| 12972 |
|
| 12973 |
/* send LOAD_DONE command to MCP */ |
| 12974 |
if (!BXE_NOMCP(sc)) { |
| 12975 |
load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); |
| 12976 |
if (!load_code) { |
| 12977 |
BLOGE(sc, "MCP response failure, aborting\n"); |
| 12978 |
sc->state = BXE_STATE_ERROR; |
| 12979 |
rc = ENXIO; |
| 12980 |
goto bxe_nic_load_error3; |
| 12981 |
} |
| 12982 |
} |
| 12983 |
|
| 12984 |
rc = bxe_setup_leading(sc); |
| 12985 |
if (rc) { |
| 12986 |
BLOGE(sc, "Setup leading failed!\n"); |
| 12987 |
sc->state = BXE_STATE_ERROR; |
| 12988 |
goto bxe_nic_load_error3; |
| 12989 |
} |
| 12990 |
|
| 12991 |
FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { |
| 12992 |
rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); |
| 12993 |
if (rc) { |
| 12994 |
BLOGE(sc, "Queue(%d) setup failed\n", i); |
| 12995 |
sc->state = BXE_STATE_ERROR; |
| 12996 |
goto bxe_nic_load_error3; |
| 12997 |
} |
| 12998 |
} |
| 12999 |
|
| 13000 |
rc = bxe_init_rss_pf(sc); |
| 13001 |
if (rc) { |
| 13002 |
BLOGE(sc, "PF RSS init failed\n"); |
| 13003 |
sc->state = BXE_STATE_ERROR; |
| 13004 |
goto bxe_nic_load_error3; |
| 13005 |
} |
| 13006 |
} |
| 13007 |
/* XXX VF */ |
| 13008 |
#if 0 |
| 13009 |
else { /* VF */ |
| 13010 |
FOR_EACH_ETH_QUEUE(sc, i) { |
| 13011 |
rc = bxe_vfpf_setup_q(sc, i); |
| 13012 |
if (rc) { |
| 13013 |
BLOGE(sc, "Queue(%d) setup failed\n", i); |
| 13014 |
sc->state = BXE_STATE_ERROR; |
| 13015 |
goto bxe_nic_load_error3; |
| 13016 |
} |
| 13017 |
} |
| 13018 |
} |
| 13019 |
#endif |
| 13020 |
|
| 13021 |
/* now when Clients are configured we are ready to work */ |
| 13022 |
sc->state = BXE_STATE_OPEN; |
| 13023 |
|
| 13024 |
/* Configure a ucast MAC */ |
| 13025 |
if (IS_PF(sc)) { |
| 13026 |
rc = bxe_set_eth_mac(sc, TRUE); |
| 13027 |
} |
| 13028 |
#if 0 |
| 13029 |
else { /* IS_VF(sc) */ |
| 13030 |
rc = bxe_vfpf_set_mac(sc); |
| 13031 |
} |
| 13032 |
#endif |
| 13033 |
if (rc) { |
| 13034 |
BLOGE(sc, "Setting Ethernet MAC failed\n"); |
| 13035 |
sc->state = BXE_STATE_ERROR; |
| 13036 |
goto bxe_nic_load_error3; |
| 13037 |
} |
| 13038 |
|
| 13039 |
#if 0 |
| 13040 |
if (IS_PF(sc) && sc->pending_max) { |
| 13041 |
/* for AFEX */ |
| 13042 |
bxe_update_max_mf_config(sc, sc->pending_max); |
| 13043 |
sc->pending_max = 0; |
| 13044 |
} |
| 13045 |
#endif |
| 13046 |
|
| 13047 |
if (sc->port.pmf) { |
| 13048 |
rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); |
| 13049 |
if (rc) { |
| 13050 |
sc->state = BXE_STATE_ERROR; |
| 13051 |
goto bxe_nic_load_error3; |
| 13052 |
} |
| 13053 |
} |
| 13054 |
|
| 13055 |
sc->link_params.feature_config_flags &= |
| 13056 |
~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; |
| 13057 |
|
| 13058 |
/* start fast path */ |
| 13059 |
|
| 13060 |
/* Initialize Rx filter */ |
| 13061 |
bxe_set_rx_mode(sc); |
| 13062 |
|
| 13063 |
/* start the Tx */ |
| 13064 |
switch (/* XXX load_mode */LOAD_OPEN) { |
| 13065 |
case LOAD_NORMAL: |
| 13066 |
case LOAD_OPEN: |
| 13067 |
break; |
| 13068 |
|
| 13069 |
case LOAD_DIAG: |
| 13070 |
case LOAD_LOOPBACK_EXT: |
| 13071 |
sc->state = BXE_STATE_DIAG; |
| 13072 |
break; |
| 13073 |
|
| 13074 |
default: |
| 13075 |
break; |
| 13076 |
} |
| 13077 |
|
| 13078 |
if (sc->port.pmf) { |
| 13079 |
bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); |
| 13080 |
} else { |
| 13081 |
bxe_link_status_update(sc); |
| 13082 |
} |
| 13083 |
|
| 13084 |
/* start the periodic timer callout */ |
| 13085 |
bxe_periodic_start(sc); |
| 13086 |
|
| 13087 |
if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { |
| 13088 |
/* mark driver is loaded in shmem2 */ |
| 13089 |
val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); |
| 13090 |
SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], |
| 13091 |
(val | |
| 13092 |
DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | |
| 13093 |
DRV_FLAGS_CAPABILITIES_LOADED_L2)); |
| 13094 |
} |
| 13095 |
|
| 13096 |
/* wait for all pending SP commands to complete */ |
| 13097 |
if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { |
| 13098 |
BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); |
| 13099 |
bxe_periodic_stop(sc); |
| 13100 |
bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); |
| 13101 |
return (ENXIO); |
| 13102 |
} |
| 13103 |
|
| 13104 |
#if 0 |
| 13105 |
/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ |
| 13106 |
if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) { |
| 13107 |
bxe_dcbx_init(sc, FALSE); |
| 13108 |
} |
| 13109 |
#endif |
| 13110 |
|
| 13111 |
/* Tell the stack the driver is running! */ |
| 13112 |
if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); |
| 13113 |
|
| 13114 |
BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); |
| 13115 |
|
| 13116 |
return (0); |
| 13117 |
|
| 13118 |
bxe_nic_load_error3: |
| 13119 |
|
| 13120 |
if (IS_PF(sc)) { |
| 13121 |
bxe_int_disable_sync(sc, 1); |
| 13122 |
|
| 13123 |
/* clean out queued objects */ |
| 13124 |
bxe_squeeze_objects(sc); |
| 13125 |
} |
| 13126 |
|
| 13127 |
bxe_interrupt_detach(sc); |
| 13128 |
|
| 13129 |
bxe_nic_load_error2: |
| 13130 |
|
| 13131 |
if (IS_PF(sc) && !BXE_NOMCP(sc)) { |
| 13132 |
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); |
| 13133 |
bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); |
| 13134 |
} |
| 13135 |
|
| 13136 |
sc->port.pmf = 0; |
| 13137 |
|
| 13138 |
bxe_nic_load_error1: |
| 13139 |
|
| 13140 |
/* clear pf_load status, as it was already set */ |
| 13141 |
if (IS_PF(sc)) { |
| 13142 |
bxe_clear_pf_load(sc); |
| 13143 |
} |
| 13144 |
|
| 13145 |
bxe_nic_load_error0: |
| 13146 |
|
| 13147 |
bxe_free_fw_stats_mem(sc); |
| 13148 |
bxe_free_fp_buffers(sc); |
| 13149 |
bxe_free_mem(sc); |
| 13150 |
|
| 13151 |
return (rc); |
| 13152 |
} |
| 13153 |
|
| 13154 |
static int |
| 13155 |
bxe_init_locked(struct bxe_softc *sc) |
| 13156 |
{ |
| 13157 |
int other_engine = SC_PATH(sc) ? 0 : 1; |
| 13158 |
uint8_t other_load_status, load_status; |
| 13159 |
uint8_t global = FALSE; |
| 13160 |
int rc; |
| 13161 |
|
| 13162 |
BXE_CORE_LOCK_ASSERT(sc); |
| 13163 |
|
| 13164 |
/* check if the driver is already running */ |
| 13165 |
if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { |
| 13166 |
BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); |
| 13167 |
return (0); |
| 13168 |
} |
| 13169 |
|
| 13170 |
bxe_set_power_state(sc, PCI_PM_D0); |
| 13171 |
|
| 13172 |
/* |
| 13173 |
* If parity occurred during the unload, then attentions and/or |
| 13174 |
* RECOVERY_IN_PROGRES may still be set. If so we want the first function |
| 13175 |
* loaded on the current engine to complete the recovery. Parity recovery |
| 13176 |
* is only relevant for PF driver. |
| 13177 |
*/ |
| 13178 |
if (IS_PF(sc)) { |
| 13179 |
other_load_status = bxe_get_load_status(sc, other_engine); |
| 13180 |
load_status = bxe_get_load_status(sc, SC_PATH(sc)); |
| 13181 |
|
| 13182 |
if (!bxe_reset_is_done(sc, SC_PATH(sc)) || |
| 13183 |
bxe_chk_parity_attn(sc, &global, TRUE)) { |
| 13184 |
do { |
| 13185 |
/* |
| 13186 |
* If there are attentions and they are in global blocks, set |
| 13187 |
* the GLOBAL_RESET bit regardless whether it will be this |
| 13188 |
* function that will complete the recovery or not. |
| 13189 |
*/ |
| 13190 |
if (global) { |
| 13191 |
bxe_set_reset_global(sc); |
| 13192 |
} |
| 13193 |
|
| 13194 |
/* |
| 13195 |
* Only the first function on the current engine should try |
| 13196 |
* to recover in open. In case of attentions in global blocks |
| 13197 |
* only the first in the chip should try to recover. |
| 13198 |
*/ |
| 13199 |
if ((!load_status && (!global || !other_load_status)) && |
| 13200 |
bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { |
| 13201 |
BLOGI(sc, "Recovered during init\n"); |
| 13202 |
break; |
| 13203 |
} |
| 13204 |
|
| 13205 |
/* recovery has failed... */ |
| 13206 |
bxe_set_power_state(sc, PCI_PM_D3hot); |
| 13207 |
sc->recovery_state = BXE_RECOVERY_FAILED; |
| 13208 |
|
| 13209 |
BLOGE(sc, "Recovery flow hasn't properly " |
| 13210 |
"completed yet, try again later. " |
| 13211 |
"If you still see this message after a " |
| 13212 |
"few retries then power cycle is required.\n"); |
| 13213 |
|
| 13214 |
rc = ENXIO; |
| 13215 |
goto bxe_init_locked_done; |
| 13216 |
} while (0); |
| 13217 |
} |
| 13218 |
} |
| 13219 |
|
| 13220 |
sc->recovery_state = BXE_RECOVERY_DONE; |
| 13221 |
|
| 13222 |
rc = bxe_nic_load(sc, LOAD_OPEN); |
| 13223 |
|
| 13224 |
bxe_init_locked_done: |
| 13225 |
|
| 13226 |
if (rc) { |
| 13227 |
/* Tell the stack the driver is NOT running! */ |
| 13228 |
BLOGE(sc, "Initialization failed, " |
| 13229 |
"stack notified driver is NOT running!\n"); |
| 13230 |
if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); |
| 13231 |
} |
| 13232 |
|
| 13233 |
return (rc); |
| 13234 |
} |
| 13235 |
|
| 13236 |
static int |
| 13237 |
bxe_stop_locked(struct bxe_softc *sc) |
| 13238 |
{ |
| 13239 |
BXE_CORE_LOCK_ASSERT(sc); |
| 13240 |
return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); |
| 13241 |
} |
| 13242 |
|
| 13243 |
/* |
| 13244 |
* Handles controller initialization when called from an unlocked routine. |
| 13245 |
* ifconfig calls this function. |
| 13246 |
* |
| 13247 |
* Returns: |
| 13248 |
* void |
| 13249 |
*/ |
| 13250 |
static void |
| 13251 |
bxe_init(void *xsc) |
| 13252 |
{ |
| 13253 |
struct bxe_softc *sc = (struct bxe_softc *)xsc; |
| 13254 |
|
| 13255 |
BXE_CORE_LOCK(sc); |
| 13256 |
bxe_init_locked(sc); |
| 13257 |
BXE_CORE_UNLOCK(sc); |
| 13258 |
} |
| 13259 |
|
| 13260 |
static int |
| 13261 |
bxe_init_ifnet(struct bxe_softc *sc) |
| 13262 |
{ |
| 13263 |
if_t ifp; |
| 13264 |
int capabilities; |
| 13265 |
|
| 13266 |
/* ifconfig entrypoint for media type/status reporting */ |
| 13267 |
ifmedia_init(&sc->ifmedia, IFM_IMASK, |
| 13268 |
bxe_ifmedia_update, |
| 13269 |
bxe_ifmedia_status); |
| 13270 |
|
| 13271 |
/* set the default interface values */ |
| 13272 |
ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); |
| 13273 |
ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); |
| 13274 |
ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); |
| 13275 |
|
| 13276 |
sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ |
| 13277 |
|
| 13278 |
/* allocate the ifnet structure */ |
| 13279 |
if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { |
| 13280 |
BLOGE(sc, "Interface allocation failed!\n"); |
| 13281 |
return (ENXIO); |
| 13282 |
} |
| 13283 |
|
| 13284 |
if_setsoftc(ifp, sc); |
| 13285 |
if_initname_drv(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); |
| 13286 |
if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); |
| 13287 |
if_setioctlfn(ifp, bxe_ioctl); |
| 13288 |
if_setstartfn(ifp, bxe_tx_start); |
| 13289 |
#if __FreeBSD_version >= 800000 |
| 13290 |
if_settransmitfn(ifp, bxe_tx_mq_start); |
| 13291 |
if_setqflushfn(ifp, bxe_mq_flush); |
| 13292 |
#endif |
| 13293 |
#ifdef FreeBSD8_0 |
| 13294 |
if_settimer(ifp, 0); |
| 13295 |
#endif |
| 13296 |
if_setinitfn(ifp, bxe_init); |
| 13297 |
if_setmtu(ifp, sc->mtu); |
| 13298 |
if_sethwassist(ifp, (CSUM_IP | |
| 13299 |
CSUM_TCP | |
| 13300 |
CSUM_UDP | |
| 13301 |
CSUM_TSO | |
| 13302 |
CSUM_TCP_IPV6 | |
| 13303 |
CSUM_UDP_IPV6)); |
| 13304 |
|
| 13305 |
capabilities = |
| 13306 |
#if __FreeBSD_version < 700000 |
| 13307 |
(IFCAP_VLAN_MTU | |
| 13308 |
IFCAP_VLAN_HWTAGGING | |
| 13309 |
IFCAP_HWCSUM | |
| 13310 |
IFCAP_JUMBO_MTU | |
| 13311 |
IFCAP_LRO); |
| 13312 |
#else |
| 13313 |
(IFCAP_VLAN_MTU | |
| 13314 |
IFCAP_VLAN_HWTAGGING | |
| 13315 |
IFCAP_VLAN_HWTSO | |
| 13316 |
IFCAP_VLAN_HWFILTER | |
| 13317 |
IFCAP_VLAN_HWCSUM | |
| 13318 |
IFCAP_HWCSUM | |
| 13319 |
IFCAP_JUMBO_MTU | |
| 13320 |
IFCAP_LRO | |
| 13321 |
IFCAP_TSO4 | |
| 13322 |
IFCAP_TSO6 | |
| 13323 |
IFCAP_WOL_MAGIC); |
| 13324 |
#endif |
| 13325 |
if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ |
| 13326 |
if_setbaudrate(ifp, IF_Gbps(10)); |
| 13327 |
/* XXX */ |
| 13328 |
if_setsendqlen(ifp, sc->tx_ring_size); |
| 13329 |
if_setsendqready(ifp); |
| 13330 |
/* XXX */ |
| 13331 |
|
| 13332 |
sc->ifp = ifp; |
| 13333 |
|
| 13334 |
/* attach to the Ethernet interface list */ |
| 13335 |
ether_ifattach_drv(ifp, sc->link_params.mac_addr); |
| 13336 |
|
| 13337 |
return (0); |
| 13338 |
} |
| 13339 |
|
| 13340 |
static void |
| 13341 |
bxe_deallocate_bars(struct bxe_softc *sc) |
| 13342 |
{ |
| 13343 |
int i; |
| 13344 |
|
| 13345 |
for (i = 0; i < MAX_BARS; i++) { |
| 13346 |
if (sc->bar[i].resource != NULL) { |
| 13347 |
bus_release_resource(sc->dev, |
| 13348 |
SYS_RES_MEMORY, |
| 13349 |
sc->bar[i].rid, |
| 13350 |
sc->bar[i].resource); |
| 13351 |
BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", |
| 13352 |
i, PCIR_BAR(i)); |
| 13353 |
} |
| 13354 |
} |
| 13355 |
} |
| 13356 |
|
| 13357 |
static int |
| 13358 |
bxe_allocate_bars(struct bxe_softc *sc) |
| 13359 |
{ |
| 13360 |
u_int flags; |
| 13361 |
int i; |
| 13362 |
|
| 13363 |
memset(sc->bar, 0, sizeof(sc->bar)); |
| 13364 |
|
| 13365 |
for (i = 0; i < MAX_BARS; i++) { |
| 13366 |
|
| 13367 |
/* memory resources reside at BARs 0, 2, 4 */ |
| 13368 |
/* Run `pciconf -lb` to see mappings */ |
| 13369 |
if ((i != 0) && (i != 2) && (i != 4)) { |
| 13370 |
continue; |
| 13371 |
} |
| 13372 |
|
| 13373 |
sc->bar[i].rid = PCIR_BAR(i); |
| 13374 |
|
| 13375 |
flags = RF_ACTIVE; |
| 13376 |
if (i == 0) { |
| 13377 |
flags |= RF_SHAREABLE; |
| 13378 |
} |
| 13379 |
|
| 13380 |
if ((sc->bar[i].resource = |
| 13381 |
bus_alloc_resource_any(sc->dev, |
| 13382 |
SYS_RES_MEMORY, |
| 13383 |
&sc->bar[i].rid, |
| 13384 |
flags)) == NULL) { |
| 13385 |
#if 0 |
| 13386 |
/* BAR4 doesn't exist for E1 */ |
| 13387 |
BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n", |
| 13388 |
i, PCIR_BAR(i)); |
| 13389 |
#endif |
| 13390 |
return (0); |
| 13391 |
} |
| 13392 |
|
| 13393 |
sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); |
| 13394 |
sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); |
| 13395 |
sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); |
| 13396 |
|
| 13397 |
BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %p-%p (%ld) -> %p\n", |
| 13398 |
i, PCIR_BAR(i), |
| 13399 |
(void *)rman_get_start(sc->bar[i].resource), |
| 13400 |
(void *)rman_get_end(sc->bar[i].resource), |
| 13401 |
rman_get_size(sc->bar[i].resource), |
| 13402 |
(void *)sc->bar[i].kva); |
| 13403 |
} |
| 13404 |
|
| 13405 |
return (0); |
| 13406 |
} |
| 13407 |
|
| 13408 |
static void |
| 13409 |
bxe_get_function_num(struct bxe_softc *sc) |
| 13410 |
{ |
| 13411 |
uint32_t val = 0; |
| 13412 |
|
| 13413 |
/* |
| 13414 |
* Read the ME register to get the function number. The ME register |
| 13415 |
* holds the relative-function number and absolute-function number. The |
| 13416 |
* absolute-function number appears only in E2 and above. Before that |
| 13417 |
* these bits always contained zero, therefore we cannot blindly use them. |
| 13418 |
*/ |
| 13419 |
|
| 13420 |
val = REG_RD(sc, BAR_ME_REGISTER); |
| 13421 |
|
| 13422 |
sc->pfunc_rel = |
| 13423 |
(uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); |
| 13424 |
sc->path_id = |
| 13425 |
(uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; |
| 13426 |
|
| 13427 |
if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { |
| 13428 |
sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); |
| 13429 |
} else { |
| 13430 |
sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); |
| 13431 |
} |
| 13432 |
|
| 13433 |
BLOGD(sc, DBG_LOAD, |
| 13434 |
"Relative function %d, Absolute function %d, Path %d\n", |
| 13435 |
sc->pfunc_rel, sc->pfunc_abs, sc->path_id); |
| 13436 |
} |
| 13437 |
|
| 13438 |
static uint32_t |
| 13439 |
bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) |
| 13440 |
{ |
| 13441 |
uint32_t shmem2_size; |
| 13442 |
uint32_t offset; |
| 13443 |
uint32_t mf_cfg_offset_value; |
| 13444 |
|
| 13445 |
/* Non 57712 */ |
| 13446 |
offset = (SHMEM_RD(sc, func_mb) + |
| 13447 |
(MAX_FUNC_NUM * sizeof(struct drv_func_mb))); |
| 13448 |
|
| 13449 |
/* 57712 plus */ |
| 13450 |
if (sc->devinfo.shmem2_base != 0) { |
| 13451 |
shmem2_size = SHMEM2_RD(sc, size); |
| 13452 |
if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { |
| 13453 |
mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); |
| 13454 |
if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { |
| 13455 |
offset = mf_cfg_offset_value; |
| 13456 |
} |
| 13457 |
} |
| 13458 |
} |
| 13459 |
|
| 13460 |
return (offset); |
| 13461 |
} |
| 13462 |
|
| 13463 |
static uint32_t |
| 13464 |
bxe_pcie_capability_read(struct bxe_softc *sc, |
| 13465 |
int reg, |
| 13466 |
int width) |
| 13467 |
{ |
| 13468 |
int pcie_reg; |
| 13469 |
|
| 13470 |
/* ensure PCIe capability is enabled */ |
| 13471 |
if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { |
| 13472 |
if (pcie_reg != 0) { |
| 13473 |
BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); |
| 13474 |
return (pci_read_config(sc->dev, (pcie_reg + reg), width)); |
| 13475 |
} |
| 13476 |
} |
| 13477 |
|
| 13478 |
BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); |
| 13479 |
|
| 13480 |
return (0); |
| 13481 |
} |
| 13482 |
|
| 13483 |
static uint8_t |
| 13484 |
bxe_is_pcie_pending(struct bxe_softc *sc) |
| 13485 |
{ |
| 13486 |
return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & |
| 13487 |
PCIM_EXP_STA_TRANSACTION_PND); |
| 13488 |
} |
| 13489 |
|
| 13490 |
/* |
| 13491 |
* Walk the PCI capabiites list for the device to find what features are |
| 13492 |
* supported. These capabilites may be enabled/disabled by firmware so it's |
| 13493 |
* best to walk the list rather than make assumptions. |
| 13494 |
*/ |
| 13495 |
static void |
| 13496 |
bxe_probe_pci_caps(struct bxe_softc *sc) |
| 13497 |
{ |
| 13498 |
uint16_t link_status; |
| 13499 |
int reg; |
| 13500 |
|
| 13501 |
/* check if PCI Power Management is enabled */ |
| 13502 |
if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { |
| 13503 |
if (reg != 0) { |
| 13504 |
BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); |
| 13505 |
|
| 13506 |
sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; |
| 13507 |
sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; |
| 13508 |
} |
| 13509 |
} |
| 13510 |
|
| 13511 |
link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); |
| 13512 |
|
| 13513 |
/* handle PCIe 2.0 workarounds for 57710 */ |
| 13514 |
if (CHIP_IS_E1(sc)) { |
| 13515 |
/* workaround for 57710 errata E4_57710_27462 */ |
| 13516 |
sc->devinfo.pcie_link_speed = |
| 13517 |
(REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; |
| 13518 |
|
| 13519 |
/* workaround for 57710 errata E4_57710_27488 */ |
| 13520 |
sc->devinfo.pcie_link_width = |
| 13521 |
((link_status & PCIM_LINK_STA_WIDTH) >> 4); |
| 13522 |
if (sc->devinfo.pcie_link_speed > 1) { |
| 13523 |
sc->devinfo.pcie_link_width = |
| 13524 |
((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; |
| 13525 |
} |
| 13526 |
} else { |
| 13527 |
sc->devinfo.pcie_link_speed = |
| 13528 |
(link_status & PCIM_LINK_STA_SPEED); |
| 13529 |
sc->devinfo.pcie_link_width = |
| 13530 |
((link_status & PCIM_LINK_STA_WIDTH) >> 4); |
| 13531 |
} |
| 13532 |
|
| 13533 |
BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", |
| 13534 |
sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); |
| 13535 |
|
| 13536 |
sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; |
| 13537 |
sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; |
| 13538 |
|
| 13539 |
/* check if MSI capability is enabled */ |
| 13540 |
if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { |
| 13541 |
if (reg != 0) { |
| 13542 |
BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); |
| 13543 |
|
| 13544 |
sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; |
| 13545 |
sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; |
| 13546 |
} |
| 13547 |
} |
| 13548 |
|
| 13549 |
/* check if MSI-X capability is enabled */ |
| 13550 |
if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { |
| 13551 |
if (reg != 0) { |
| 13552 |
BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); |
| 13553 |
|
| 13554 |
sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; |
| 13555 |
sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; |
| 13556 |
} |
| 13557 |
} |
| 13558 |
} |
| 13559 |
|
| 13560 |
static int |
| 13561 |
bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) |
| 13562 |
{ |
| 13563 |
struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; |
| 13564 |
uint32_t val; |
| 13565 |
|
| 13566 |
/* get the outer vlan if we're in switch-dependent mode */ |
| 13567 |
|
| 13568 |
val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); |
| 13569 |
mf_info->ext_id = (uint16_t)val; |
| 13570 |
|
| 13571 |
mf_info->multi_vnics_mode = 1; |
| 13572 |
|
| 13573 |
if (!VALID_OVLAN(mf_info->ext_id)) { |
| 13574 |
BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); |
| 13575 |
return (1); |
| 13576 |
} |
| 13577 |
|
| 13578 |
/* get the capabilities */ |
| 13579 |
if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == |
| 13580 |
FUNC_MF_CFG_PROTOCOL_ISCSI) { |
| 13581 |
mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; |
| 13582 |
} else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == |
| 13583 |
FUNC_MF_CFG_PROTOCOL_FCOE) { |
| 13584 |
mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; |
| 13585 |
} else { |
| 13586 |
mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; |
| 13587 |
} |
| 13588 |
|
| 13589 |
mf_info->vnics_per_port = |
| 13590 |
(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; |
| 13591 |
|
| 13592 |
return (0); |
| 13593 |
} |
| 13594 |
|
| 13595 |
static uint32_t |
| 13596 |
bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) |
| 13597 |
{ |
| 13598 |
uint32_t retval = 0; |
| 13599 |
uint32_t val; |
| 13600 |
|
| 13601 |
val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); |
| 13602 |
|
| 13603 |
if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { |
| 13604 |
if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { |
| 13605 |
retval |= MF_PROTO_SUPPORT_ETHERNET; |
| 13606 |
} |
| 13607 |
if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { |
| 13608 |
retval |= MF_PROTO_SUPPORT_ISCSI; |
| 13609 |
} |
| 13610 |
if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { |
| 13611 |
retval |= MF_PROTO_SUPPORT_FCOE; |
| 13612 |
} |
| 13613 |
} |
| 13614 |
|
| 13615 |
return (retval); |
| 13616 |
} |
| 13617 |
|
| 13618 |
static int |
| 13619 |
bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) |
| 13620 |
{ |
| 13621 |
struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; |
| 13622 |
uint32_t val; |
| 13623 |
|
| 13624 |
/* |
| 13625 |
* There is no outer vlan if we're in switch-independent mode. |
| 13626 |
* If the mac is valid then assume multi-function. |
| 13627 |
*/ |
| 13628 |
|
| 13629 |
val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); |
| 13630 |
|
| 13631 |
mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); |
| 13632 |
|
| 13633 |
mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); |
| 13634 |
|
| 13635 |
mf_info->vnics_per_port = |
| 13636 |
(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; |
| 13637 |
|
| 13638 |
return (0); |
| 13639 |
} |
| 13640 |
|
| 13641 |
static int |
| 13642 |
bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) |
| 13643 |
{ |
| 13644 |
struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; |
| 13645 |
uint32_t e1hov_tag; |
| 13646 |
uint32_t func_config; |
| 13647 |
uint32_t niv_config; |
| 13648 |
|
| 13649 |
mf_info->multi_vnics_mode = 1; |
| 13650 |
|
| 13651 |
e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); |
| 13652 |
func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); |
| 13653 |
niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); |
| 13654 |
|
| 13655 |
mf_info->ext_id = |
| 13656 |
(uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> |
| 13657 |
FUNC_MF_CFG_E1HOV_TAG_SHIFT); |
| 13658 |
|
| 13659 |
mf_info->default_vlan = |
| 13660 |
(uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> |
| 13661 |
FUNC_MF_CFG_AFEX_VLAN_SHIFT); |
| 13662 |
|
| 13663 |
mf_info->niv_allowed_priorities = |
| 13664 |
(uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> |
| 13665 |
FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); |
| 13666 |
|
| 13667 |
mf_info->niv_default_cos = |
| 13668 |
(uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> |
| 13669 |
FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); |
| 13670 |
|
| 13671 |
mf_info->afex_vlan_mode = |
| 13672 |
((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> |
| 13673 |
FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); |
| 13674 |
|
| 13675 |
mf_info->niv_mba_enabled = |
| 13676 |
((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> |
| 13677 |
FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); |
| 13678 |
|
| 13679 |
mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); |
| 13680 |
|
| 13681 |
mf_info->vnics_per_port = |
| 13682 |
(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; |
| 13683 |
|
| 13684 |
return (0); |
| 13685 |
} |
| 13686 |
|
| 13687 |
static int |
| 13688 |
bxe_check_valid_mf_cfg(struct bxe_softc *sc) |
| 13689 |
{ |
| 13690 |
struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; |
| 13691 |
uint32_t mf_cfg1; |
| 13692 |
uint32_t mf_cfg2; |
| 13693 |
uint32_t ovlan1; |
| 13694 |
uint32_t ovlan2; |
| 13695 |
uint8_t i, j; |
| 13696 |
|
| 13697 |
BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", |
| 13698 |
SC_PORT(sc)); |
| 13699 |
BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", |
| 13700 |
mf_info->mf_config[SC_VN(sc)]); |
| 13701 |
BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", |
| 13702 |
mf_info->multi_vnics_mode); |
| 13703 |
BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", |
| 13704 |
mf_info->vnics_per_port); |
| 13705 |
BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", |
| 13706 |
mf_info->ext_id); |
| 13707 |
BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", |
| 13708 |
mf_info->min_bw[0], mf_info->min_bw[1], |
| 13709 |
mf_info->min_bw[2], mf_info->min_bw[3]); |
| 13710 |
BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", |
| 13711 |
mf_info->max_bw[0], mf_info->max_bw[1], |
| 13712 |
mf_info->max_bw[2], mf_info->max_bw[3]); |
| 13713 |
BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", |
| 13714 |
sc->mac_addr_str); |
| 13715 |
|
| 13716 |
/* various MF mode sanity checks... */ |
| 13717 |
|
| 13718 |
if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { |
| 13719 |
BLOGE(sc, "Enumerated function %d is marked as hidden\n", |
| 13720 |
SC_PORT(sc)); |
| 13721 |
return (1); |
| 13722 |
} |
| 13723 |
|
| 13724 |
if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { |
| 13725 |
BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", |
| 13726 |
mf_info->vnics_per_port, mf_info->multi_vnics_mode); |
| 13727 |
return (1); |
| 13728 |
} |
| 13729 |
|
| 13730 |
if (mf_info->mf_mode == MULTI_FUNCTION_SD) { |
| 13731 |
/* vnic id > 0 must have valid ovlan in switch-dependent mode */ |
| 13732 |
if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { |
| 13733 |
BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", |
| 13734 |
SC_VN(sc), OVLAN(sc)); |
| 13735 |
return (1); |
| 13736 |
} |
| 13737 |
|
| 13738 |
if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { |
| 13739 |
BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", |
| 13740 |
mf_info->multi_vnics_mode, OVLAN(sc)); |
| 13741 |
return (1); |
| 13742 |
} |
| 13743 |
|
| 13744 |
/* |
| 13745 |
* Verify all functions are either MF or SF mode. If MF, make sure |
| 13746 |
* sure that all non-hidden functions have a valid ovlan. If SF, |
| 13747 |
* make sure that all non-hidden functions have an invalid ovlan. |
| 13748 |
*/ |
| 13749 |
FOREACH_ABS_FUNC_IN_PORT(sc, i) { |
| 13750 |
mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); |
| 13751 |
ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); |
| 13752 |
if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && |
| 13753 |
(((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || |
| 13754 |
((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { |
| 13755 |
BLOGE(sc, "mf_mode=SD function %d MF config " |
| 13756 |
"mismatch, multi_vnics_mode=%d ovlan=%d\n", |
| 13757 |
i, mf_info->multi_vnics_mode, ovlan1); |
| 13758 |
return (1); |
| 13759 |
} |
| 13760 |
} |
| 13761 |
|
| 13762 |
/* Verify all funcs on the same port each have a different ovlan. */ |
| 13763 |
FOREACH_ABS_FUNC_IN_PORT(sc, i) { |
| 13764 |
mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); |
| 13765 |
ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); |
| 13766 |
/* iterate from the next function on the port to the max func */ |
| 13767 |
for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { |
| 13768 |
mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); |
| 13769 |
ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); |
| 13770 |
if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && |
| 13771 |
VALID_OVLAN(ovlan1) && |
| 13772 |
!(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && |
| 13773 |
VALID_OVLAN(ovlan2) && |
| 13774 |
(ovlan1 == ovlan2)) { |
| 13775 |
BLOGE(sc, "mf_mode=SD functions %d and %d " |
| 13776 |
"have the same ovlan (%d)\n", |
| 13777 |
i, j, ovlan1); |
| 13778 |
return (1); |
| 13779 |
} |
| 13780 |
} |
| 13781 |
} |
| 13782 |
} /* MULTI_FUNCTION_SD */ |
| 13783 |
|
| 13784 |
return (0); |
| 13785 |
} |
| 13786 |
|
| 13787 |
static int |
| 13788 |
bxe_get_mf_cfg_info(struct bxe_softc *sc) |
| 13789 |
{ |
| 13790 |
struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; |
| 13791 |
uint32_t val, mac_upper; |
| 13792 |
uint8_t i, vnic; |
| 13793 |
|
| 13794 |
/* initialize mf_info defaults */ |
| 13795 |
mf_info->vnics_per_port = 1; |
| 13796 |
mf_info->multi_vnics_mode = FALSE; |
| 13797 |
mf_info->path_has_ovlan = FALSE; |
| 13798 |
mf_info->mf_mode = SINGLE_FUNCTION; |
| 13799 |
|
| 13800 |
if (!CHIP_IS_MF_CAP(sc)) { |
| 13801 |
return (0); |
| 13802 |
} |
| 13803 |
|
| 13804 |
if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { |
| 13805 |
BLOGE(sc, "Invalid mf_cfg_base!\n"); |
| 13806 |
return (1); |
| 13807 |
} |
| 13808 |
|
| 13809 |
/* get the MF mode (switch dependent / independent / single-function) */ |
| 13810 |
|
| 13811 |
val = SHMEM_RD(sc, dev_info.shared_feature_config.config); |
| 13812 |
|
| 13813 |
switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) |
| 13814 |
{ |
| 13815 |
case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: |
| 13816 |
|
| 13817 |
mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); |
| 13818 |
|
| 13819 |
/* check for legal upper mac bytes */ |
| 13820 |
if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { |
| 13821 |
mf_info->mf_mode = MULTI_FUNCTION_SI; |
| 13822 |
} else { |
| 13823 |
BLOGE(sc, "Invalid config for Switch Independent mode\n"); |
| 13824 |
} |
| 13825 |
|
| 13826 |
break; |
| 13827 |
|
| 13828 |
case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: |
| 13829 |
case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: |
| 13830 |
|
| 13831 |
/* get outer vlan configuration */ |
| 13832 |
val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); |
| 13833 |
|
| 13834 |
if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != |
| 13835 |
FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { |
| 13836 |
mf_info->mf_mode = MULTI_FUNCTION_SD; |
| 13837 |
} else { |
| 13838 |
BLOGE(sc, "Invalid config for Switch Dependent mode\n"); |
| 13839 |
} |
| 13840 |
|
| 13841 |
break; |
| 13842 |
|
| 13843 |
case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: |
| 13844 |
|
| 13845 |
/* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ |
| 13846 |
return (0); |
| 13847 |
|
| 13848 |
case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: |
| 13849 |
|
| 13850 |
/* |
| 13851 |
* Mark MF mode as NIV if MCP version includes NPAR-SD support |
| 13852 |
* and the MAC address is valid. |
| 13853 |
*/ |
| 13854 |
mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); |
| 13855 |
|
| 13856 |
if ((SHMEM2_HAS(sc, afex_driver_support)) && |
| 13857 |
(mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { |
| 13858 |
mf_info->mf_mode = MULTI_FUNCTION_AFEX; |
| 13859 |
} else { |
| 13860 |
BLOGE(sc, "Invalid config for AFEX mode\n"); |
| 13861 |
} |
| 13862 |
|
| 13863 |
break; |
| 13864 |
|
| 13865 |
default: |
| 13866 |
|
| 13867 |
BLOGE(sc, "Unknown MF mode (0x%08x)\n", |
| 13868 |
(val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); |
| 13869 |
|
| 13870 |
return (1); |
| 13871 |
} |
| 13872 |
|
| 13873 |
/* set path mf_mode (which could be different than function mf_mode) */ |
| 13874 |
if (mf_info->mf_mode == MULTI_FUNCTION_SD) { |
| 13875 |
mf_info->path_has_ovlan = TRUE; |
| 13876 |
} else if (mf_info->mf_mode == SINGLE_FUNCTION) { |
| 13877 |
/* |
| 13878 |
* Decide on path multi vnics mode. If we're not in MF mode and in |
| 13879 |
* 4-port mode, this is good enough to check vnic-0 of the other port |
| 13880 |
* on the same path |
| 13881 |
*/ |
| 13882 |
if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { |
| 13883 |
uint8_t other_port = !(PORT_ID(sc) & 1); |
| 13884 |
uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); |
| 13885 |
|
| 13886 |
val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); |
| 13887 |
|
| 13888 |
mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; |
| 13889 |
} |
| 13890 |
} |
| 13891 |
|
| 13892 |
if (mf_info->mf_mode == SINGLE_FUNCTION) { |
| 13893 |
/* invalid MF config */ |
| 13894 |
if (SC_VN(sc) >= 1) { |
| 13895 |
BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); |
| 13896 |
return (1); |
| 13897 |
} |
| 13898 |
|
| 13899 |
return (0); |
| 13900 |
} |
| 13901 |
|
| 13902 |
/* get the MF configuration */ |
| 13903 |
mf_info->mf_config[SC_VN(sc)] = |
| 13904 |
MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); |
| 13905 |
|
| 13906 |
switch(mf_info->mf_mode) |
| 13907 |
{ |
| 13908 |
case MULTI_FUNCTION_SD: |
| 13909 |
|
| 13910 |
bxe_get_shmem_mf_cfg_info_sd(sc); |
| 13911 |
break; |
| 13912 |
|
| 13913 |
case MULTI_FUNCTION_SI: |
| 13914 |
|
| 13915 |
bxe_get_shmem_mf_cfg_info_si(sc); |
| 13916 |
break; |
| 13917 |
|
| 13918 |
case MULTI_FUNCTION_AFEX: |
| 13919 |
|
| 13920 |
bxe_get_shmem_mf_cfg_info_niv(sc); |
| 13921 |
break; |
| 13922 |
|
| 13923 |
default: |
| 13924 |
|
| 13925 |
BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", |
| 13926 |
mf_info->mf_mode); |
| 13927 |
return (1); |
| 13928 |
} |
| 13929 |
|
| 13930 |
/* get the congestion management parameters */ |
| 13931 |
|
| 13932 |
vnic = 0; |
| 13933 |
FOREACH_ABS_FUNC_IN_PORT(sc, i) { |
| 13934 |
/* get min/max bw */ |
| 13935 |
val = MFCFG_RD(sc, func_mf_config[i].config); |
| 13936 |
mf_info->min_bw[vnic] = |
| 13937 |
((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); |
| 13938 |
mf_info->max_bw[vnic] = |
| 13939 |
((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); |
| 13940 |
vnic++; |
| 13941 |
} |
| 13942 |
|
| 13943 |
return (bxe_check_valid_mf_cfg(sc)); |
| 13944 |
} |
| 13945 |
|
| 13946 |
static int |
| 13947 |
bxe_get_shmem_info(struct bxe_softc *sc) |
| 13948 |
{ |
| 13949 |
int port; |
| 13950 |
uint32_t mac_hi, mac_lo, val; |
| 13951 |
|
| 13952 |
port = SC_PORT(sc); |
| 13953 |
mac_hi = mac_lo = 0; |
| 13954 |
|
| 13955 |
sc->link_params.sc = sc; |
| 13956 |
sc->link_params.port = port; |
| 13957 |
|
| 13958 |
/* get the hardware config info */ |
| 13959 |
sc->devinfo.hw_config = |
| 13960 |
SHMEM_RD(sc, dev_info.shared_hw_config.config); |
| 13961 |
sc->devinfo.hw_config2 = |
| 13962 |
SHMEM_RD(sc, dev_info.shared_hw_config.config2); |
| 13963 |
|
| 13964 |
sc->link_params.hw_led_mode = |
| 13965 |
((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> |
| 13966 |
SHARED_HW_CFG_LED_MODE_SHIFT); |
| 13967 |
|
| 13968 |
/* get the port feature config */ |
| 13969 |
sc->port.config = |
| 13970 |
SHMEM_RD(sc, dev_info.port_feature_config[port].config), |
| 13971 |
|
| 13972 |
/* get the link params */ |
| 13973 |
sc->link_params.speed_cap_mask[0] = |
| 13974 |
SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); |
| 13975 |
sc->link_params.speed_cap_mask[1] = |
| 13976 |
SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); |
| 13977 |
|
| 13978 |
/* get the lane config */ |
| 13979 |
sc->link_params.lane_config = |
| 13980 |
SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); |
| 13981 |
|
| 13982 |
/* get the link config */ |
| 13983 |
val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); |
| 13984 |
sc->port.link_config[ELINK_INT_PHY] = val; |
| 13985 |
sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); |
| 13986 |
sc->port.link_config[ELINK_EXT_PHY1] = |
| 13987 |
SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); |
| 13988 |
|
| 13989 |
/* get the override preemphasis flag and enable it or turn it off */ |
| 13990 |
val = SHMEM_RD(sc, dev_info.shared_feature_config.config); |
| 13991 |
if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { |
| 13992 |
sc->link_params.feature_config_flags |= |
| 13993 |
ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; |
| 13994 |
} else { |
| 13995 |
sc->link_params.feature_config_flags &= |
| 13996 |
~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; |
| 13997 |
} |
| 13998 |
|
| 13999 |
/* get the initial value of the link params */ |
| 14000 |
sc->link_params.multi_phy_config = |
| 14001 |
SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); |
| 14002 |
|
| 14003 |
/* get external phy info */ |
| 14004 |
sc->port.ext_phy_config = |
| 14005 |
SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); |
| 14006 |
|
| 14007 |
/* get the multifunction configuration */ |
| 14008 |
bxe_get_mf_cfg_info(sc); |
| 14009 |
|
| 14010 |
/* get the mac address */ |
| 14011 |
if (IS_MF(sc)) { |
| 14012 |
mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); |
| 14013 |
mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); |
| 14014 |
} else { |
| 14015 |
mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); |
| 14016 |
mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); |
| 14017 |
} |
| 14018 |
|
| 14019 |
if ((mac_lo == 0) && (mac_hi == 0)) { |
| 14020 |
*sc->mac_addr_str = 0; |
| 14021 |
BLOGE(sc, "No Ethernet address programmed!\n"); |
| 14022 |
} else { |
| 14023 |
sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); |
| 14024 |
sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); |
| 14025 |
sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); |
| 14026 |
sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); |
| 14027 |
sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); |
| 14028 |
sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); |
| 14029 |
snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), |
| 14030 |
"%02x:%02x:%02x:%02x:%02x:%02x", |
| 14031 |
sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], |
| 14032 |
sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], |
| 14033 |
sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); |
| 14034 |
BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); |
| 14035 |
} |
| 14036 |
|
| 14037 |
#if 0 |
| 14038 |
if (!IS_MF(sc) && |
| 14039 |
((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == |
| 14040 |
PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) { |
| 14041 |
sc->flags |= BXE_NO_ISCSI; |
| 14042 |
} |
| 14043 |
if (!IS_MF(sc) && |
| 14044 |
((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == |
| 14045 |
PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) { |
| 14046 |
sc->flags |= BXE_NO_FCOE_FLAG; |
| 14047 |
} |
| 14048 |
#endif |
| 14049 |
|
| 14050 |
return (0); |
| 14051 |
} |
| 14052 |
|
| 14053 |
static void |
| 14054 |
bxe_get_tunable_params(struct bxe_softc *sc) |
| 14055 |
{ |
| 14056 |
/* sanity checks */ |
| 14057 |
|
| 14058 |
if ((bxe_interrupt_mode != INTR_MODE_INTX) && |
| 14059 |
(bxe_interrupt_mode != INTR_MODE_MSI) && |
| 14060 |
(bxe_interrupt_mode != INTR_MODE_MSIX)) { |
| 14061 |
BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); |
| 14062 |
bxe_interrupt_mode = INTR_MODE_MSIX; |
| 14063 |
} |
| 14064 |
|
| 14065 |
if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { |
| 14066 |
BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); |
| 14067 |
bxe_queue_count = 0; |
| 14068 |
} |
| 14069 |
|
| 14070 |
if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { |
| 14071 |
if (bxe_max_rx_bufs == 0) { |
| 14072 |
bxe_max_rx_bufs = RX_BD_USABLE; |
| 14073 |
} else { |
| 14074 |
BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); |
| 14075 |
bxe_max_rx_bufs = 2048; |
| 14076 |
} |
| 14077 |
} |
| 14078 |
|
| 14079 |
if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { |
| 14080 |
BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); |
| 14081 |
bxe_hc_rx_ticks = 25; |
| 14082 |
} |
| 14083 |
|
| 14084 |
if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { |
| 14085 |
BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); |
| 14086 |
bxe_hc_tx_ticks = 50; |
| 14087 |
} |
| 14088 |
|
| 14089 |
if (bxe_max_aggregation_size == 0) { |
| 14090 |
bxe_max_aggregation_size = TPA_AGG_SIZE; |
| 14091 |
} |
| 14092 |
|
| 14093 |
if (bxe_max_aggregation_size > 0xffff) { |
| 14094 |
BLOGW(sc, "invalid max_aggregation_size (%d)\n", |
| 14095 |
bxe_max_aggregation_size); |
| 14096 |
bxe_max_aggregation_size = TPA_AGG_SIZE; |
| 14097 |
} |
| 14098 |
|
| 14099 |
if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { |
| 14100 |
BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); |
| 14101 |
bxe_mrrs = -1; |
| 14102 |
} |
| 14103 |
|
| 14104 |
if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { |
| 14105 |
BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); |
| 14106 |
bxe_autogreeen = 0; |
| 14107 |
} |
| 14108 |
|
| 14109 |
if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { |
| 14110 |
BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); |
| 14111 |
bxe_udp_rss = 0; |
| 14112 |
} |
| 14113 |
|
| 14114 |
/* pull in user settings */ |
| 14115 |
|
| 14116 |
sc->interrupt_mode = bxe_interrupt_mode; |
| 14117 |
sc->max_rx_bufs = bxe_max_rx_bufs; |
| 14118 |
sc->hc_rx_ticks = bxe_hc_rx_ticks; |
| 14119 |
sc->hc_tx_ticks = bxe_hc_tx_ticks; |
| 14120 |
sc->max_aggregation_size = bxe_max_aggregation_size; |
| 14121 |
sc->mrrs = bxe_mrrs; |
| 14122 |
sc->autogreeen = bxe_autogreeen; |
| 14123 |
sc->udp_rss = bxe_udp_rss; |
| 14124 |
|
| 14125 |
if (bxe_interrupt_mode == INTR_MODE_INTX) { |
| 14126 |
sc->num_queues = 1; |
| 14127 |
} else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ |
| 14128 |
sc->num_queues = |
| 14129 |
min((bxe_queue_count ? bxe_queue_count : mp_ncpus), |
| 14130 |
MAX_RSS_CHAINS); |
| 14131 |
if (sc->num_queues > mp_ncpus) { |
| 14132 |
sc->num_queues = mp_ncpus; |
| 14133 |
} |
| 14134 |
} |
| 14135 |
|
| 14136 |
BLOGD(sc, DBG_LOAD, |
| 14137 |
"User Config: " |
| 14138 |
"debug=0x%lx " |
| 14139 |
"interrupt_mode=%d " |
| 14140 |
"queue_count=%d " |
| 14141 |
"hc_rx_ticks=%d " |
| 14142 |
"hc_tx_ticks=%d " |
| 14143 |
"rx_budget=%d " |
| 14144 |
"max_aggregation_size=%d " |
| 14145 |
"mrrs=%d " |
| 14146 |
"autogreeen=%d " |
| 14147 |
"udp_rss=%d\n", |
| 14148 |
bxe_debug, |
| 14149 |
sc->interrupt_mode, |
| 14150 |
sc->num_queues, |
| 14151 |
sc->hc_rx_ticks, |
| 14152 |
sc->hc_tx_ticks, |
| 14153 |
bxe_rx_budget, |
| 14154 |
sc->max_aggregation_size, |
| 14155 |
sc->mrrs, |
| 14156 |
sc->autogreeen, |
| 14157 |
sc->udp_rss); |
| 14158 |
} |
| 14159 |
|
| 14160 |
static void |
| 14161 |
bxe_media_detect(struct bxe_softc *sc) |
| 14162 |
{ |
| 14163 |
uint32_t phy_idx = bxe_get_cur_phy_idx(sc); |
| 14164 |
switch (sc->link_params.phy[phy_idx].media_type) { |
| 14165 |
case ELINK_ETH_PHY_SFPP_10G_FIBER: |
| 14166 |
case ELINK_ETH_PHY_XFP_FIBER: |
| 14167 |
BLOGI(sc, "Found 10Gb Fiber media.\n"); |
| 14168 |
sc->media = IFM_10G_SR; |
| 14169 |
break; |
| 14170 |
case ELINK_ETH_PHY_SFP_1G_FIBER: |
| 14171 |
BLOGI(sc, "Found 1Gb Fiber media.\n"); |
| 14172 |
sc->media = IFM_1000_SX; |
| 14173 |
break; |
| 14174 |
case ELINK_ETH_PHY_KR: |
| 14175 |
case ELINK_ETH_PHY_CX4: |
| 14176 |
BLOGI(sc, "Found 10GBase-CX4 media.\n"); |
| 14177 |
sc->media = IFM_10G_CX4; |
| 14178 |
break; |
| 14179 |
case ELINK_ETH_PHY_DA_TWINAX: |
| 14180 |
BLOGI(sc, "Found 10Gb Twinax media.\n"); |
| 14181 |
sc->media = IFM_10G_TWINAX; |
| 14182 |
break; |
| 14183 |
case ELINK_ETH_PHY_BASE_T: |
| 14184 |
if (sc->link_params.speed_cap_mask[0] & |
| 14185 |
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { |
| 14186 |
BLOGI(sc, "Found 10GBase-T media.\n"); |
| 14187 |
sc->media = IFM_10G_T; |
| 14188 |
} else { |
| 14189 |
BLOGI(sc, "Found 1000Base-T media.\n"); |
| 14190 |
sc->media = IFM_1000_T; |
| 14191 |
} |
| 14192 |
break; |
| 14193 |
case ELINK_ETH_PHY_NOT_PRESENT: |
| 14194 |
BLOGI(sc, "Media not present.\n"); |
| 14195 |
sc->media = 0; |
| 14196 |
break; |
| 14197 |
case ELINK_ETH_PHY_UNSPECIFIED: |
| 14198 |
default: |
| 14199 |
BLOGI(sc, "Unknown media!\n"); |
| 14200 |
sc->media = 0; |
| 14201 |
break; |
| 14202 |
} |
| 14203 |
} |
| 14204 |
|
| 14205 |
#define GET_FIELD(value, fname) \ |
| 14206 |
(((value) & (fname##_MASK)) >> (fname##_SHIFT)) |
| 14207 |
#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) |
| 14208 |
#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) |
| 14209 |
|
| 14210 |
static int |
| 14211 |
bxe_get_igu_cam_info(struct bxe_softc *sc) |
| 14212 |
{ |
| 14213 |
int pfid = SC_FUNC(sc); |
| 14214 |
int igu_sb_id; |
| 14215 |
uint32_t val; |
| 14216 |
uint8_t fid, igu_sb_cnt = 0; |
| 14217 |
|
| 14218 |
sc->igu_base_sb = 0xff; |
| 14219 |
|
| 14220 |
if (CHIP_INT_MODE_IS_BC(sc)) { |
| 14221 |
int vn = SC_VN(sc); |
| 14222 |
igu_sb_cnt = sc->igu_sb_cnt; |
| 14223 |
sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * |
| 14224 |
FP_SB_MAX_E1x); |
| 14225 |
sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + |
| 14226 |
(CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); |
| 14227 |
return (0); |
| 14228 |
} |
| 14229 |
|
| 14230 |
/* IGU in normal mode - read CAM */ |
| 14231 |
for (igu_sb_id = 0; |
| 14232 |
igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; |
| 14233 |
igu_sb_id++) { |
| 14234 |
val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); |
| 14235 |
if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { |
| 14236 |
continue; |
| 14237 |
} |
| 14238 |
fid = IGU_FID(val); |
| 14239 |
if ((fid & IGU_FID_ENCODE_IS_PF)) { |
| 14240 |
if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { |
| 14241 |
continue; |
| 14242 |
} |
| 14243 |
if (IGU_VEC(val) == 0) { |
| 14244 |
/* default status block */ |
| 14245 |
sc->igu_dsb_id = igu_sb_id; |
| 14246 |
} else { |
| 14247 |
if (sc->igu_base_sb == 0xff) { |
| 14248 |
sc->igu_base_sb = igu_sb_id; |
| 14249 |
} |
| 14250 |
igu_sb_cnt++; |
| 14251 |
} |
| 14252 |
} |
| 14253 |
} |
| 14254 |
|
| 14255 |
/* |
| 14256 |
* Due to new PF resource allocation by MFW T7.4 and above, it's optional |
| 14257 |
* that number of CAM entries will not be equal to the value advertised in |
| 14258 |
* PCI. Driver should use the minimal value of both as the actual status |
| 14259 |
* block count |
| 14260 |
*/ |
| 14261 |
sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); |
| 14262 |
|
| 14263 |
if (igu_sb_cnt == 0) { |
| 14264 |
BLOGE(sc, "CAM configuration error\n"); |
| 14265 |
return (-1); |
| 14266 |
} |
| 14267 |
|
| 14268 |
return (0); |
| 14269 |
} |
| 14270 |
|
| 14271 |
/* |
| 14272 |
* Gather various information from the device config space, the device itself, |
| 14273 |
* shmem, and the user input. |
| 14274 |
*/ |
| 14275 |
static int |
| 14276 |
bxe_get_device_info(struct bxe_softc *sc) |
| 14277 |
{ |
| 14278 |
uint32_t val; |
| 14279 |
int rc; |
| 14280 |
|
| 14281 |
/* Get the data for the device */ |
| 14282 |
sc->devinfo.vendor_id = pci_get_vendor(sc->dev); |
| 14283 |
sc->devinfo.device_id = pci_get_device(sc->dev); |
| 14284 |
sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); |
| 14285 |
sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); |
| 14286 |
|
| 14287 |
/* get the chip revision (chip metal comes from pci config space) */ |
| 14288 |
sc->devinfo.chip_id = |
| 14289 |
sc->link_params.chip_id = |
| 14290 |
(((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | |
| 14291 |
((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | |
| 14292 |
(((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | |
| 14293 |
((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); |
| 14294 |
|
| 14295 |
/* force 57811 according to MISC register */ |
| 14296 |
if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { |
| 14297 |
if (CHIP_IS_57810(sc)) { |
| 14298 |
sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | |
| 14299 |
(sc->devinfo.chip_id & 0x0000ffff)); |
| 14300 |
} else if (CHIP_IS_57810_MF(sc)) { |
| 14301 |
sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | |
| 14302 |
(sc->devinfo.chip_id & 0x0000ffff)); |
| 14303 |
} |
| 14304 |
sc->devinfo.chip_id |= 0x1; |
| 14305 |
} |
| 14306 |
|
| 14307 |
BLOGD(sc, DBG_LOAD, |
| 14308 |
"chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", |
| 14309 |
sc->devinfo.chip_id, |
| 14310 |
((sc->devinfo.chip_id >> 16) & 0xffff), |
| 14311 |
((sc->devinfo.chip_id >> 12) & 0xf), |
| 14312 |
((sc->devinfo.chip_id >> 4) & 0xff), |
| 14313 |
((sc->devinfo.chip_id >> 0) & 0xf)); |
| 14314 |
|
| 14315 |
val = (REG_RD(sc, 0x2874) & 0x55); |
| 14316 |
if ((sc->devinfo.chip_id & 0x1) || |
| 14317 |
(CHIP_IS_E1(sc) && val) || |
| 14318 |
(CHIP_IS_E1H(sc) && (val == 0x55))) { |
| 14319 |
sc->flags |= BXE_ONE_PORT_FLAG; |
| 14320 |
BLOGD(sc, DBG_LOAD, "single port device\n"); |
| 14321 |
} |
| 14322 |
|
| 14323 |
/* set the doorbell size */ |
| 14324 |
sc->doorbell_size = (1 << BXE_DB_SHIFT); |
| 14325 |
|
| 14326 |
/* determine whether the device is in 2 port or 4 port mode */ |
| 14327 |
sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ |
| 14328 |
if (CHIP_IS_E2E3(sc)) { |
| 14329 |
/* |
| 14330 |
* Read port4mode_en_ovwr[0]: |
| 14331 |
* If 1, four port mode is in port4mode_en_ovwr[1]. |
| 14332 |
* If 0, four port mode is in port4mode_en[0]. |
| 14333 |
*/ |
| 14334 |
val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); |
| 14335 |
if (val & 1) { |
| 14336 |
val = ((val >> 1) & 1); |
| 14337 |
} else { |
| 14338 |
val = REG_RD(sc, MISC_REG_PORT4MODE_EN); |
| 14339 |
} |
| 14340 |
|
| 14341 |
sc->devinfo.chip_port_mode = |
| 14342 |
(val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; |
| 14343 |
|
| 14344 |
BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); |
| 14345 |
} |
| 14346 |
|
| 14347 |
/* get the function and path info for the device */ |
| 14348 |
bxe_get_function_num(sc); |
| 14349 |
|
| 14350 |
/* get the shared memory base address */ |
| 14351 |
sc->devinfo.shmem_base = |
| 14352 |
sc->link_params.shmem_base = |
| 14353 |
REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); |
| 14354 |
sc->devinfo.shmem2_base = |
| 14355 |
REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : |
| 14356 |
MISC_REG_GENERIC_CR_0)); |
| 14357 |
|
| 14358 |
BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", |
| 14359 |
sc->devinfo.shmem_base, sc->devinfo.shmem2_base); |
| 14360 |
|
| 14361 |
if (!sc->devinfo.shmem_base) { |
| 14362 |
/* this should ONLY prevent upcoming shmem reads */ |
| 14363 |
BLOGI(sc, "MCP not active\n"); |
| 14364 |
sc->flags |= BXE_NO_MCP_FLAG; |
| 14365 |
return (0); |
| 14366 |
} |
| 14367 |
|
| 14368 |
/* make sure the shared memory contents are valid */ |
| 14369 |
val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); |
| 14370 |
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != |
| 14371 |
(SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { |
| 14372 |
BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); |
| 14373 |
return (0); |
| 14374 |
} |
| 14375 |
BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); |
| 14376 |
|
| 14377 |
/* get the bootcode version */ |
| 14378 |
sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); |
| 14379 |
snprintf(sc->devinfo.bc_ver_str, |
| 14380 |
sizeof(sc->devinfo.bc_ver_str), |
| 14381 |
"%d.%d.%d", |
| 14382 |
((sc->devinfo.bc_ver >> 24) & 0xff), |
| 14383 |
((sc->devinfo.bc_ver >> 16) & 0xff), |
| 14384 |
((sc->devinfo.bc_ver >> 8) & 0xff)); |
| 14385 |
BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); |
| 14386 |
|
| 14387 |
/* get the bootcode shmem address */ |
| 14388 |
sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); |
| 14389 |
BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); |
| 14390 |
|
| 14391 |
/* clean indirect addresses as they're not used */ |
| 14392 |
pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); |
| 14393 |
if (IS_PF(sc)) { |
| 14394 |
REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); |
| 14395 |
REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); |
| 14396 |
REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); |
| 14397 |
REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); |
| 14398 |
if (CHIP_IS_E1x(sc)) { |
| 14399 |
REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); |
| 14400 |
REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); |
| 14401 |
REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); |
| 14402 |
REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); |
| 14403 |
} |
| 14404 |
|
| 14405 |
/* |
| 14406 |
* Enable internal target-read (in case we are probed after PF |
| 14407 |
* FLR). Must be done prior to any BAR read access. Only for |
| 14408 |
* 57712 and up |
| 14409 |
*/ |
| 14410 |
if (!CHIP_IS_E1x(sc)) { |
| 14411 |
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); |
| 14412 |
} |
| 14413 |
} |
| 14414 |
|
| 14415 |
/* get the nvram size */ |
| 14416 |
val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); |
| 14417 |
sc->devinfo.flash_size = |
| 14418 |
(NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); |
| 14419 |
BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); |
| 14420 |
|
| 14421 |
/* get PCI capabilites */ |
| 14422 |
bxe_probe_pci_caps(sc); |
| 14423 |
|
| 14424 |
bxe_set_power_state(sc, PCI_PM_D0); |
| 14425 |
|
| 14426 |
/* get various configuration parameters from shmem */ |
| 14427 |
bxe_get_shmem_info(sc); |
| 14428 |
|
| 14429 |
if (sc->devinfo.pcie_msix_cap_reg != 0) { |
| 14430 |
val = pci_read_config(sc->dev, |
| 14431 |
(sc->devinfo.pcie_msix_cap_reg + |
| 14432 |
PCIR_MSIX_CTRL), |
| 14433 |
2); |
| 14434 |
sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); |
| 14435 |
} else { |
| 14436 |
sc->igu_sb_cnt = 1; |
| 14437 |
} |
| 14438 |
|
| 14439 |
sc->igu_base_addr = BAR_IGU_INTMEM; |
| 14440 |
|
| 14441 |
/* initialize IGU parameters */ |
| 14442 |
if (CHIP_IS_E1x(sc)) { |
| 14443 |
sc->devinfo.int_block = INT_BLOCK_HC; |
| 14444 |
sc->igu_dsb_id = DEF_SB_IGU_ID; |
| 14445 |
sc->igu_base_sb = 0; |
| 14446 |
} else { |
| 14447 |
sc->devinfo.int_block = INT_BLOCK_IGU; |
| 14448 |
|
| 14449 |
/* do not allow device reset during IGU info preocessing */ |
| 14450 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); |
| 14451 |
|
| 14452 |
val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); |
| 14453 |
|
| 14454 |
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { |
| 14455 |
int tout = 5000; |
| 14456 |
|
| 14457 |
BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); |
| 14458 |
|
| 14459 |
val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); |
| 14460 |
REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); |
| 14461 |
REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); |
| 14462 |
|
| 14463 |
while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { |
| 14464 |
tout--; |
| 14465 |
DELAY(1000); |
| 14466 |
} |
| 14467 |
|
| 14468 |
if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { |
| 14469 |
BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); |
| 14470 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); |
| 14471 |
return (-1); |
| 14472 |
} |
| 14473 |
} |
| 14474 |
|
| 14475 |
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { |
| 14476 |
BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); |
| 14477 |
sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; |
| 14478 |
} else { |
| 14479 |
BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); |
| 14480 |
} |
| 14481 |
|
| 14482 |
rc = bxe_get_igu_cam_info(sc); |
| 14483 |
|
| 14484 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); |
| 14485 |
|
| 14486 |
if (rc) { |
| 14487 |
return (rc); |
| 14488 |
} |
| 14489 |
} |
| 14490 |
|
| 14491 |
/* |
| 14492 |
* Get base FW non-default (fast path) status block ID. This value is |
| 14493 |
* used to initialize the fw_sb_id saved on the fp/queue structure to |
| 14494 |
* determine the id used by the FW. |
| 14495 |
*/ |
| 14496 |
if (CHIP_IS_E1x(sc)) { |
| 14497 |
sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); |
| 14498 |
} else { |
| 14499 |
/* |
| 14500 |
* 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of |
| 14501 |
* the same queue are indicated on the same IGU SB). So we prefer |
| 14502 |
* FW and IGU SBs to be the same value. |
| 14503 |
*/ |
| 14504 |
sc->base_fw_ndsb = sc->igu_base_sb; |
| 14505 |
} |
| 14506 |
|
| 14507 |
BLOGD(sc, DBG_LOAD, |
| 14508 |
"igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", |
| 14509 |
sc->igu_dsb_id, sc->igu_base_sb, |
| 14510 |
sc->igu_sb_cnt, sc->base_fw_ndsb); |
| 14511 |
|
| 14512 |
elink_phy_probe(&sc->link_params); |
| 14513 |
|
| 14514 |
return (0); |
| 14515 |
} |
| 14516 |
|
| 14517 |
static void |
| 14518 |
bxe_link_settings_supported(struct bxe_softc *sc, |
| 14519 |
uint32_t switch_cfg) |
| 14520 |
{ |
| 14521 |
uint32_t cfg_size = 0; |
| 14522 |
uint32_t idx; |
| 14523 |
uint8_t port = SC_PORT(sc); |
| 14524 |
|
| 14525 |
/* aggregation of supported attributes of all external phys */ |
| 14526 |
sc->port.supported[0] = 0; |
| 14527 |
sc->port.supported[1] = 0; |
| 14528 |
|
| 14529 |
switch (sc->link_params.num_phys) { |
| 14530 |
case 1: |
| 14531 |
sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; |
| 14532 |
cfg_size = 1; |
| 14533 |
break; |
| 14534 |
case 2: |
| 14535 |
sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; |
| 14536 |
cfg_size = 1; |
| 14537 |
break; |
| 14538 |
case 3: |
| 14539 |
if (sc->link_params.multi_phy_config & |
| 14540 |
PORT_HW_CFG_PHY_SWAPPED_ENABLED) { |
| 14541 |
sc->port.supported[1] = |
| 14542 |
sc->link_params.phy[ELINK_EXT_PHY1].supported; |
| 14543 |
sc->port.supported[0] = |
| 14544 |
sc->link_params.phy[ELINK_EXT_PHY2].supported; |
| 14545 |
} else { |
| 14546 |
sc->port.supported[0] = |
| 14547 |
sc->link_params.phy[ELINK_EXT_PHY1].supported; |
| 14548 |
sc->port.supported[1] = |
| 14549 |
sc->link_params.phy[ELINK_EXT_PHY2].supported; |
| 14550 |
} |
| 14551 |
cfg_size = 2; |
| 14552 |
break; |
| 14553 |
} |
| 14554 |
|
| 14555 |
if (!(sc->port.supported[0] || sc->port.supported[1])) { |
| 14556 |
BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", |
| 14557 |
SHMEM_RD(sc, |
| 14558 |
dev_info.port_hw_config[port].external_phy_config), |
| 14559 |
SHMEM_RD(sc, |
| 14560 |
dev_info.port_hw_config[port].external_phy_config2)); |
| 14561 |
return; |
| 14562 |
} |
| 14563 |
|
| 14564 |
if (CHIP_IS_E3(sc)) |
| 14565 |
sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); |
| 14566 |
else { |
| 14567 |
switch (switch_cfg) { |
| 14568 |
case ELINK_SWITCH_CFG_1G: |
| 14569 |
sc->port.phy_addr = |
| 14570 |
REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); |
| 14571 |
break; |
| 14572 |
case ELINK_SWITCH_CFG_10G: |
| 14573 |
sc->port.phy_addr = |
| 14574 |
REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); |
| 14575 |
break; |
| 14576 |
default: |
| 14577 |
BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", |
| 14578 |
sc->port.link_config[0]); |
| 14579 |
return; |
| 14580 |
} |
| 14581 |
} |
| 14582 |
|
| 14583 |
BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); |
| 14584 |
|
| 14585 |
/* mask what we support according to speed_cap_mask per configuration */ |
| 14586 |
for (idx = 0; idx < cfg_size; idx++) { |
| 14587 |
if (!(sc->link_params.speed_cap_mask[idx] & |
| 14588 |
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { |
| 14589 |
sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; |
| 14590 |
} |
| 14591 |
|
| 14592 |
if (!(sc->link_params.speed_cap_mask[idx] & |
| 14593 |
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { |
| 14594 |
sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; |
| 14595 |
} |
| 14596 |
|
| 14597 |
if (!(sc->link_params.speed_cap_mask[idx] & |
| 14598 |
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { |
| 14599 |
sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; |
| 14600 |
} |
| 14601 |
|
| 14602 |
if (!(sc->link_params.speed_cap_mask[idx] & |
| 14603 |
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { |
| 14604 |
sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; |
| 14605 |
} |
| 14606 |
|
| 14607 |
if (!(sc->link_params.speed_cap_mask[idx] & |
| 14608 |
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { |
| 14609 |
sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; |
| 14610 |
} |
| 14611 |
|
| 14612 |
if (!(sc->link_params.speed_cap_mask[idx] & |
| 14613 |
PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { |
| 14614 |
sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; |
| 14615 |
} |
| 14616 |
|
| 14617 |
if (!(sc->link_params.speed_cap_mask[idx] & |
| 14618 |
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { |
| 14619 |
sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; |
| 14620 |
} |
| 14621 |
|
| 14622 |
if (!(sc->link_params.speed_cap_mask[idx] & |
| 14623 |
PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { |
| 14624 |
sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; |
| 14625 |
} |
| 14626 |
} |
| 14627 |
|
| 14628 |
BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", |
| 14629 |
sc->port.supported[0], sc->port.supported[1]); |
| 14630 |
} |
| 14631 |
|
| 14632 |
static void |
| 14633 |
bxe_link_settings_requested(struct bxe_softc *sc) |
| 14634 |
{ |
| 14635 |
uint32_t link_config; |
| 14636 |
uint32_t idx; |
| 14637 |
uint32_t cfg_size = 0; |
| 14638 |
|
| 14639 |
sc->port.advertising[0] = 0; |
| 14640 |
sc->port.advertising[1] = 0; |
| 14641 |
|
| 14642 |
switch (sc->link_params.num_phys) { |
| 14643 |
case 1: |
| 14644 |
case 2: |
| 14645 |
cfg_size = 1; |
| 14646 |
break; |
| 14647 |
case 3: |
| 14648 |
cfg_size = 2; |
| 14649 |
break; |
| 14650 |
} |
| 14651 |
|
| 14652 |
for (idx = 0; idx < cfg_size; idx++) { |
| 14653 |
sc->link_params.req_duplex[idx] = DUPLEX_FULL; |
| 14654 |
link_config = sc->port.link_config[idx]; |
| 14655 |
|
| 14656 |
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { |
| 14657 |
case PORT_FEATURE_LINK_SPEED_AUTO: |
| 14658 |
if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { |
| 14659 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; |
| 14660 |
sc->port.advertising[idx] |= sc->port.supported[idx]; |
| 14661 |
if (sc->link_params.phy[ELINK_EXT_PHY1].type == |
| 14662 |
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) |
| 14663 |
sc->port.advertising[idx] |= |
| 14664 |
(ELINK_SUPPORTED_100baseT_Half | |
| 14665 |
ELINK_SUPPORTED_100baseT_Full); |
| 14666 |
} else { |
| 14667 |
/* force 10G, no AN */ |
| 14668 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; |
| 14669 |
sc->port.advertising[idx] |= |
| 14670 |
(ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); |
| 14671 |
continue; |
| 14672 |
} |
| 14673 |
break; |
| 14674 |
|
| 14675 |
case PORT_FEATURE_LINK_SPEED_10M_FULL: |
| 14676 |
if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { |
| 14677 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; |
| 14678 |
sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | |
| 14679 |
ADVERTISED_TP); |
| 14680 |
} else { |
| 14681 |
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " |
| 14682 |
"speed_cap_mask=0x%08x\n", |
| 14683 |
link_config, sc->link_params.speed_cap_mask[idx]); |
| 14684 |
return; |
| 14685 |
} |
| 14686 |
break; |
| 14687 |
|
| 14688 |
case PORT_FEATURE_LINK_SPEED_10M_HALF: |
| 14689 |
if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { |
| 14690 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; |
| 14691 |
sc->link_params.req_duplex[idx] = DUPLEX_HALF; |
| 14692 |
sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | |
| 14693 |
ADVERTISED_TP); |
| 14694 |
} else { |
| 14695 |
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " |
| 14696 |
"speed_cap_mask=0x%08x\n", |
| 14697 |
link_config, sc->link_params.speed_cap_mask[idx]); |
| 14698 |
return; |
| 14699 |
} |
| 14700 |
break; |
| 14701 |
|
| 14702 |
case PORT_FEATURE_LINK_SPEED_100M_FULL: |
| 14703 |
if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { |
| 14704 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; |
| 14705 |
sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | |
| 14706 |
ADVERTISED_TP); |
| 14707 |
} else { |
| 14708 |
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " |
| 14709 |
"speed_cap_mask=0x%08x\n", |
| 14710 |
link_config, sc->link_params.speed_cap_mask[idx]); |
| 14711 |
return; |
| 14712 |
} |
| 14713 |
break; |
| 14714 |
|
| 14715 |
case PORT_FEATURE_LINK_SPEED_100M_HALF: |
| 14716 |
if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { |
| 14717 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; |
| 14718 |
sc->link_params.req_duplex[idx] = DUPLEX_HALF; |
| 14719 |
sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | |
| 14720 |
ADVERTISED_TP); |
| 14721 |
} else { |
| 14722 |
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " |
| 14723 |
"speed_cap_mask=0x%08x\n", |
| 14724 |
link_config, sc->link_params.speed_cap_mask[idx]); |
| 14725 |
return; |
| 14726 |
} |
| 14727 |
break; |
| 14728 |
|
| 14729 |
case PORT_FEATURE_LINK_SPEED_1G: |
| 14730 |
if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { |
| 14731 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; |
| 14732 |
sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | |
| 14733 |
ADVERTISED_TP); |
| 14734 |
} else { |
| 14735 |
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " |
| 14736 |
"speed_cap_mask=0x%08x\n", |
| 14737 |
link_config, sc->link_params.speed_cap_mask[idx]); |
| 14738 |
return; |
| 14739 |
} |
| 14740 |
break; |
| 14741 |
|
| 14742 |
case PORT_FEATURE_LINK_SPEED_2_5G: |
| 14743 |
if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { |
| 14744 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; |
| 14745 |
sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | |
| 14746 |
ADVERTISED_TP); |
| 14747 |
} else { |
| 14748 |
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " |
| 14749 |
"speed_cap_mask=0x%08x\n", |
| 14750 |
link_config, sc->link_params.speed_cap_mask[idx]); |
| 14751 |
return; |
| 14752 |
} |
| 14753 |
break; |
| 14754 |
|
| 14755 |
case PORT_FEATURE_LINK_SPEED_10G_CX4: |
| 14756 |
if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { |
| 14757 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; |
| 14758 |
sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | |
| 14759 |
ADVERTISED_FIBRE); |
| 14760 |
} else { |
| 14761 |
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " |
| 14762 |
"speed_cap_mask=0x%08x\n", |
| 14763 |
link_config, sc->link_params.speed_cap_mask[idx]); |
| 14764 |
return; |
| 14765 |
} |
| 14766 |
break; |
| 14767 |
|
| 14768 |
case PORT_FEATURE_LINK_SPEED_20G: |
| 14769 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; |
| 14770 |
break; |
| 14771 |
|
| 14772 |
default: |
| 14773 |
BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " |
| 14774 |
"speed_cap_mask=0x%08x\n", |
| 14775 |
link_config, sc->link_params.speed_cap_mask[idx]); |
| 14776 |
sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; |
| 14777 |
sc->port.advertising[idx] = sc->port.supported[idx]; |
| 14778 |
break; |
| 14779 |
} |
| 14780 |
|
| 14781 |
sc->link_params.req_flow_ctrl[idx] = |
| 14782 |
(link_config & PORT_FEATURE_FLOW_CONTROL_MASK); |
| 14783 |
|
| 14784 |
if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { |
| 14785 |
if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { |
| 14786 |
sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; |
| 14787 |
} else { |
| 14788 |
bxe_set_requested_fc(sc); |
| 14789 |
} |
| 14790 |
} |
| 14791 |
|
| 14792 |
BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " |
| 14793 |
"req_flow_ctrl=0x%x advertising=0x%x\n", |
| 14794 |
sc->link_params.req_line_speed[idx], |
| 14795 |
sc->link_params.req_duplex[idx], |
| 14796 |
sc->link_params.req_flow_ctrl[idx], |
| 14797 |
sc->port.advertising[idx]); |
| 14798 |
} |
| 14799 |
} |
| 14800 |
|
| 14801 |
static void |
| 14802 |
bxe_get_phy_info(struct bxe_softc *sc) |
| 14803 |
{ |
| 14804 |
uint8_t port = SC_PORT(sc); |
| 14805 |
uint32_t config = sc->port.config; |
| 14806 |
uint32_t eee_mode; |
| 14807 |
|
| 14808 |
/* shmem data already read in bxe_get_shmem_info() */ |
| 14809 |
|
| 14810 |
BLOGD(sc, DBG_LOAD, "lane_config=0x%08x speed_cap_mask0=0x%08x " |
| 14811 |
"link_config0=0x%08x\n", |
| 14812 |
sc->link_params.lane_config, |
| 14813 |
sc->link_params.speed_cap_mask[0], |
| 14814 |
sc->port.link_config[0]); |
| 14815 |
|
| 14816 |
bxe_link_settings_supported(sc, sc->link_params.switch_cfg); |
| 14817 |
bxe_link_settings_requested(sc); |
| 14818 |
|
| 14819 |
if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { |
| 14820 |
sc->link_params.feature_config_flags |= |
| 14821 |
ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; |
| 14822 |
} else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { |
| 14823 |
sc->link_params.feature_config_flags &= |
| 14824 |
~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; |
| 14825 |
} else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { |
| 14826 |
sc->link_params.feature_config_flags |= |
| 14827 |
ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; |
| 14828 |
} |
| 14829 |
|
| 14830 |
/* configure link feature according to nvram value */ |
| 14831 |
eee_mode = |
| 14832 |
(((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & |
| 14833 |
PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> |
| 14834 |
PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); |
| 14835 |
if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { |
| 14836 |
sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | |
| 14837 |
ELINK_EEE_MODE_ENABLE_LPI | |
| 14838 |
ELINK_EEE_MODE_OUTPUT_TIME); |
| 14839 |
} else { |
| 14840 |
sc->link_params.eee_mode = 0; |
| 14841 |
} |
| 14842 |
|
| 14843 |
/* get the media type */ |
| 14844 |
bxe_media_detect(sc); |
| 14845 |
} |
| 14846 |
|
| 14847 |
static void |
| 14848 |
bxe_get_params(struct bxe_softc *sc) |
| 14849 |
{ |
| 14850 |
/* get user tunable params */ |
| 14851 |
bxe_get_tunable_params(sc); |
| 14852 |
|
| 14853 |
/* select the RX and TX ring sizes */ |
| 14854 |
sc->tx_ring_size = TX_BD_USABLE; |
| 14855 |
sc->rx_ring_size = RX_BD_USABLE; |
| 14856 |
|
| 14857 |
/* XXX disable WoL */ |
| 14858 |
sc->wol = 0; |
| 14859 |
} |
| 14860 |
|
| 14861 |
static void |
| 14862 |
bxe_set_modes_bitmap(struct bxe_softc *sc) |
| 14863 |
{ |
| 14864 |
uint32_t flags = 0; |
| 14865 |
|
| 14866 |
if (CHIP_REV_IS_FPGA(sc)) { |
| 14867 |
SET_FLAGS(flags, MODE_FPGA); |
| 14868 |
} else if (CHIP_REV_IS_EMUL(sc)) { |
| 14869 |
SET_FLAGS(flags, MODE_EMUL); |
| 14870 |
} else { |
| 14871 |
SET_FLAGS(flags, MODE_ASIC); |
| 14872 |
} |
| 14873 |
|
| 14874 |
if (CHIP_IS_MODE_4_PORT(sc)) { |
| 14875 |
SET_FLAGS(flags, MODE_PORT4); |
| 14876 |
} else { |
| 14877 |
SET_FLAGS(flags, MODE_PORT2); |
| 14878 |
} |
| 14879 |
|
| 14880 |
if (CHIP_IS_E2(sc)) { |
| 14881 |
SET_FLAGS(flags, MODE_E2); |
| 14882 |
} else if (CHIP_IS_E3(sc)) { |
| 14883 |
SET_FLAGS(flags, MODE_E3); |
| 14884 |
if (CHIP_REV(sc) == CHIP_REV_Ax) { |
| 14885 |
SET_FLAGS(flags, MODE_E3_A0); |
| 14886 |
} else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { |
| 14887 |
SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); |
| 14888 |
} |
| 14889 |
} |
| 14890 |
|
| 14891 |
if (IS_MF(sc)) { |
| 14892 |
SET_FLAGS(flags, MODE_MF); |
| 14893 |
switch (sc->devinfo.mf_info.mf_mode) { |
| 14894 |
case MULTI_FUNCTION_SD: |
| 14895 |
SET_FLAGS(flags, MODE_MF_SD); |
| 14896 |
break; |
| 14897 |
case MULTI_FUNCTION_SI: |
| 14898 |
SET_FLAGS(flags, MODE_MF_SI); |
| 14899 |
break; |
| 14900 |
case MULTI_FUNCTION_AFEX: |
| 14901 |
SET_FLAGS(flags, MODE_MF_AFEX); |
| 14902 |
break; |
| 14903 |
} |
| 14904 |
} else { |
| 14905 |
SET_FLAGS(flags, MODE_SF); |
| 14906 |
} |
| 14907 |
|
| 14908 |
#if defined(__LITTLE_ENDIAN) |
| 14909 |
SET_FLAGS(flags, MODE_LITTLE_ENDIAN); |
| 14910 |
#else /* __BIG_ENDIAN */ |
| 14911 |
SET_FLAGS(flags, MODE_BIG_ENDIAN); |
| 14912 |
#endif |
| 14913 |
|
| 14914 |
INIT_MODE_FLAGS(sc) = flags; |
| 14915 |
} |
| 14916 |
|
| 14917 |
static int |
| 14918 |
bxe_alloc_hsi_mem(struct bxe_softc *sc) |
| 14919 |
{ |
| 14920 |
struct bxe_fastpath *fp; |
| 14921 |
bus_addr_t busaddr; |
| 14922 |
int max_agg_queues; |
| 14923 |
int max_segments; |
| 14924 |
bus_size_t max_size; |
| 14925 |
bus_size_t max_seg_size; |
| 14926 |
char buf[32]; |
| 14927 |
int rc; |
| 14928 |
int i, j; |
| 14929 |
|
| 14930 |
/* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ |
| 14931 |
|
| 14932 |
/* allocate the parent bus DMA tag */ |
| 14933 |
rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ |
| 14934 |
1, /* alignment */ |
| 14935 |
0, /* boundary limit */ |
| 14936 |
BUS_SPACE_MAXADDR, /* restricted low */ |
| 14937 |
BUS_SPACE_MAXADDR, /* restricted hi */ |
| 14938 |
NULL, /* addr filter() */ |
| 14939 |
NULL, /* addr filter() arg */ |
| 14940 |
BUS_SPACE_MAXSIZE_32BIT, /* max map size */ |
| 14941 |
BUS_SPACE_UNRESTRICTED, /* num discontinuous */ |
| 14942 |
BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ |
| 14943 |
0, /* flags */ |
| 14944 |
NULL, /* lock() */ |
| 14945 |
NULL, /* lock() arg */ |
| 14946 |
&sc->parent_dma_tag); /* returned dma tag */ |
| 14947 |
if (rc != 0) { |
| 14948 |
BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); |
| 14949 |
return (1); |
| 14950 |
} |
| 14951 |
|
| 14952 |
/************************/ |
| 14953 |
/* DEFAULT STATUS BLOCK */ |
| 14954 |
/************************/ |
| 14955 |
|
| 14956 |
if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), |
| 14957 |
&sc->def_sb_dma, "default status block") != 0) { |
| 14958 |
/* XXX */ |
| 14959 |
bus_dma_tag_destroy(sc->parent_dma_tag); |
| 14960 |
return (1); |
| 14961 |
} |
| 14962 |
|
| 14963 |
sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; |
| 14964 |
|
| 14965 |
/***************/ |
| 14966 |
/* EVENT QUEUE */ |
| 14967 |
/***************/ |
| 14968 |
|
| 14969 |
if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, |
| 14970 |
&sc->eq_dma, "event queue") != 0) { |
| 14971 |
/* XXX */ |
| 14972 |
bxe_dma_free(sc, &sc->def_sb_dma); |
| 14973 |
sc->def_sb = NULL; |
| 14974 |
bus_dma_tag_destroy(sc->parent_dma_tag); |
| 14975 |
return (1); |
| 14976 |
} |
| 14977 |
|
| 14978 |
sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; |
| 14979 |
|
| 14980 |
/*************/ |
| 14981 |
/* SLOW PATH */ |
| 14982 |
/*************/ |
| 14983 |
|
| 14984 |
if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), |
| 14985 |
&sc->sp_dma, "slow path") != 0) { |
| 14986 |
/* XXX */ |
| 14987 |
bxe_dma_free(sc, &sc->eq_dma); |
| 14988 |
sc->eq = NULL; |
| 14989 |
bxe_dma_free(sc, &sc->def_sb_dma); |
| 14990 |
sc->def_sb = NULL; |
| 14991 |
bus_dma_tag_destroy(sc->parent_dma_tag); |
| 14992 |
return (1); |
| 14993 |
} |
| 14994 |
|
| 14995 |
sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; |
| 14996 |
|
| 14997 |
/*******************/ |
| 14998 |
/* SLOW PATH QUEUE */ |
| 14999 |
/*******************/ |
| 15000 |
|
| 15001 |
if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, |
| 15002 |
&sc->spq_dma, "slow path queue") != 0) { |
| 15003 |
/* XXX */ |
| 15004 |
bxe_dma_free(sc, &sc->sp_dma); |
| 15005 |
sc->sp = NULL; |
| 15006 |
bxe_dma_free(sc, &sc->eq_dma); |
| 15007 |
sc->eq = NULL; |
| 15008 |
bxe_dma_free(sc, &sc->def_sb_dma); |
| 15009 |
sc->def_sb = NULL; |
| 15010 |
bus_dma_tag_destroy(sc->parent_dma_tag); |
| 15011 |
return (1); |
| 15012 |
} |
| 15013 |
|
| 15014 |
sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; |
| 15015 |
|
| 15016 |
/***************************/ |
| 15017 |
/* FW DECOMPRESSION BUFFER */ |
| 15018 |
/***************************/ |
| 15019 |
|
| 15020 |
if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, |
| 15021 |
"fw decompression buffer") != 0) { |
| 15022 |
/* XXX */ |
| 15023 |
bxe_dma_free(sc, &sc->spq_dma); |
| 15024 |
sc->spq = NULL; |
| 15025 |
bxe_dma_free(sc, &sc->sp_dma); |
| 15026 |
sc->sp = NULL; |
| 15027 |
bxe_dma_free(sc, &sc->eq_dma); |
| 15028 |
sc->eq = NULL; |
| 15029 |
bxe_dma_free(sc, &sc->def_sb_dma); |
| 15030 |
sc->def_sb = NULL; |
| 15031 |
bus_dma_tag_destroy(sc->parent_dma_tag); |
| 15032 |
return (1); |
| 15033 |
} |
| 15034 |
|
| 15035 |
sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; |
| 15036 |
|
| 15037 |
if ((sc->gz_strm = |
| 15038 |
malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { |
| 15039 |
/* XXX */ |
| 15040 |
bxe_dma_free(sc, &sc->gz_buf_dma); |
| 15041 |
sc->gz_buf = NULL; |
| 15042 |
bxe_dma_free(sc, &sc->spq_dma); |
| 15043 |
sc->spq = NULL; |
| 15044 |
bxe_dma_free(sc, &sc->sp_dma); |
| 15045 |
sc->sp = NULL; |
| 15046 |
bxe_dma_free(sc, &sc->eq_dma); |
| 15047 |
sc->eq = NULL; |
| 15048 |
bxe_dma_free(sc, &sc->def_sb_dma); |
| 15049 |
sc->def_sb = NULL; |
| 15050 |
bus_dma_tag_destroy(sc->parent_dma_tag); |
| 15051 |
return (1); |
| 15052 |
} |
| 15053 |
|
| 15054 |
/*************/ |
| 15055 |
/* FASTPATHS */ |
| 15056 |
/*************/ |
| 15057 |
|
| 15058 |
/* allocate DMA memory for each fastpath structure */ |
| 15059 |
for (i = 0; i < sc->num_queues; i++) { |
| 15060 |
fp = &sc->fp[i]; |
| 15061 |
fp->sc = sc; |
| 15062 |
fp->index = i; |
| 15063 |
|
| 15064 |
/*******************/ |
| 15065 |
/* FP STATUS BLOCK */ |
| 15066 |
/*******************/ |
| 15067 |
|
| 15068 |
snprintf(buf, sizeof(buf), "fp %d status block", i); |
| 15069 |
if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), |
| 15070 |
&fp->sb_dma, buf) != 0) { |
| 15071 |
/* XXX unwind and free previous fastpath allocations */ |
| 15072 |
BLOGE(sc, "Failed to alloc %s\n", buf); |
| 15073 |
return (1); |
| 15074 |
} else { |
| 15075 |
if (CHIP_IS_E2E3(sc)) { |
| 15076 |
fp->status_block.e2_sb = |
| 15077 |
(struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; |
| 15078 |
} else { |
| 15079 |
fp->status_block.e1x_sb = |
| 15080 |
(struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; |
| 15081 |
} |
| 15082 |
} |
| 15083 |
|
| 15084 |
/******************/ |
| 15085 |
/* FP TX BD CHAIN */ |
| 15086 |
/******************/ |
| 15087 |
|
| 15088 |
snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); |
| 15089 |
if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), |
| 15090 |
&fp->tx_dma, buf) != 0) { |
| 15091 |
/* XXX unwind and free previous fastpath allocations */ |
| 15092 |
BLOGE(sc, "Failed to alloc %s\n", buf); |
| 15093 |
return (1); |
| 15094 |
} else { |
| 15095 |
fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; |
| 15096 |
} |
| 15097 |
|
| 15098 |
/* link together the tx bd chain pages */ |
| 15099 |
for (j = 1; j <= TX_BD_NUM_PAGES; j++) { |
| 15100 |
/* index into the tx bd chain array to last entry per page */ |
| 15101 |
struct eth_tx_next_bd *tx_next_bd = |
| 15102 |
&fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; |
| 15103 |
/* point to the next page and wrap from last page */ |
| 15104 |
busaddr = (fp->tx_dma.paddr + |
| 15105 |
(BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); |
| 15106 |
tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); |
| 15107 |
tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); |
| 15108 |
} |
| 15109 |
|
| 15110 |
/******************/ |
| 15111 |
/* FP RX BD CHAIN */ |
| 15112 |
/******************/ |
| 15113 |
|
| 15114 |
snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); |
| 15115 |
if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), |
| 15116 |
&fp->rx_dma, buf) != 0) { |
| 15117 |
/* XXX unwind and free previous fastpath allocations */ |
| 15118 |
BLOGE(sc, "Failed to alloc %s\n", buf); |
| 15119 |
return (1); |
| 15120 |
} else { |
| 15121 |
fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; |
| 15122 |
} |
| 15123 |
|
| 15124 |
/* link together the rx bd chain pages */ |
| 15125 |
for (j = 1; j <= RX_BD_NUM_PAGES; j++) { |
| 15126 |
/* index into the rx bd chain array to last entry per page */ |
| 15127 |
struct eth_rx_bd *rx_bd = |
| 15128 |
&fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; |
| 15129 |
/* point to the next page and wrap from last page */ |
| 15130 |
busaddr = (fp->rx_dma.paddr + |
| 15131 |
(BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); |
| 15132 |
rx_bd->addr_hi = htole32(U64_HI(busaddr)); |
| 15133 |
rx_bd->addr_lo = htole32(U64_LO(busaddr)); |
| 15134 |
} |
| 15135 |
|
| 15136 |
/*******************/ |
| 15137 |
/* FP RX RCQ CHAIN */ |
| 15138 |
/*******************/ |
| 15139 |
|
| 15140 |
snprintf(buf, sizeof(buf), "fp %d rcq chain", i); |
| 15141 |
if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), |
| 15142 |
&fp->rcq_dma, buf) != 0) { |
| 15143 |
/* XXX unwind and free previous fastpath allocations */ |
| 15144 |
BLOGE(sc, "Failed to alloc %s\n", buf); |
| 15145 |
return (1); |
| 15146 |
} else { |
| 15147 |
fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; |
| 15148 |
} |
| 15149 |
|
| 15150 |
/* link together the rcq chain pages */ |
| 15151 |
for (j = 1; j <= RCQ_NUM_PAGES; j++) { |
| 15152 |
/* index into the rcq chain array to last entry per page */ |
| 15153 |
struct eth_rx_cqe_next_page *rx_cqe_next = |
| 15154 |
(struct eth_rx_cqe_next_page *) |
| 15155 |
&fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; |
| 15156 |
/* point to the next page and wrap from last page */ |
| 15157 |
busaddr = (fp->rcq_dma.paddr + |
| 15158 |
(BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); |
| 15159 |
rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); |
| 15160 |
rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); |
| 15161 |
} |
| 15162 |
|
| 15163 |
/*******************/ |
| 15164 |
/* FP RX SGE CHAIN */ |
| 15165 |
/*******************/ |
| 15166 |
|
| 15167 |
snprintf(buf, sizeof(buf), "fp %d sge chain", i); |
| 15168 |
if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), |
| 15169 |
&fp->rx_sge_dma, buf) != 0) { |
| 15170 |
/* XXX unwind and free previous fastpath allocations */ |
| 15171 |
BLOGE(sc, "Failed to alloc %s\n", buf); |
| 15172 |
return (1); |
| 15173 |
} else { |
| 15174 |
fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; |
| 15175 |
} |
| 15176 |
|
| 15177 |
/* link together the sge chain pages */ |
| 15178 |
for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { |
| 15179 |
/* index into the rcq chain array to last entry per page */ |
| 15180 |
struct eth_rx_sge *rx_sge = |
| 15181 |
&fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; |
| 15182 |
/* point to the next page and wrap from last page */ |
| 15183 |
busaddr = (fp->rx_sge_dma.paddr + |
| 15184 |
(BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); |
| 15185 |
rx_sge->addr_hi = htole32(U64_HI(busaddr)); |
| 15186 |
rx_sge->addr_lo = htole32(U64_LO(busaddr)); |
| 15187 |
} |
| 15188 |
|
| 15189 |
/***********************/ |
| 15190 |
/* FP TX MBUF DMA MAPS */ |
| 15191 |
/***********************/ |
| 15192 |
|
| 15193 |
/* set required sizes before mapping to conserve resources */ |
| 15194 |
if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { |
| 15195 |
max_size = BXE_TSO_MAX_SIZE; |
| 15196 |
max_segments = BXE_TSO_MAX_SEGMENTS; |
| 15197 |
max_seg_size = BXE_TSO_MAX_SEG_SIZE; |
| 15198 |
} else { |
| 15199 |
max_size = (MCLBYTES * BXE_MAX_SEGMENTS); |
| 15200 |
max_segments = BXE_MAX_SEGMENTS; |
| 15201 |
max_seg_size = MCLBYTES; |
| 15202 |
} |
| 15203 |
|
| 15204 |
/* create a dma tag for the tx mbufs */ |
| 15205 |
rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ |
| 15206 |
1, /* alignment */ |
| 15207 |
0, /* boundary limit */ |
| 15208 |
BUS_SPACE_MAXADDR, /* restricted low */ |
| 15209 |
BUS_SPACE_MAXADDR, /* restricted hi */ |
| 15210 |
NULL, /* addr filter() */ |
| 15211 |
NULL, /* addr filter() arg */ |
| 15212 |
max_size, /* max map size */ |
| 15213 |
max_segments, /* num discontinuous */ |
| 15214 |
max_seg_size, /* max seg size */ |
| 15215 |
0, /* flags */ |
| 15216 |
NULL, /* lock() */ |
| 15217 |
NULL, /* lock() arg */ |
| 15218 |
&fp->tx_mbuf_tag); /* returned dma tag */ |
| 15219 |
if (rc != 0) { |
| 15220 |
/* XXX unwind and free previous fastpath allocations */ |
| 15221 |
BLOGE(sc, "Failed to create dma tag for " |
| 15222 |
"'fp %d tx mbufs' (%d)\n", |
| 15223 |
i, rc); |
| 15224 |
return (1); |
| 15225 |
} |
| 15226 |
|
| 15227 |
/* create dma maps for each of the tx mbuf clusters */ |
| 15228 |
for (j = 0; j < TX_BD_TOTAL; j++) { |
| 15229 |
if (bus_dmamap_create(fp->tx_mbuf_tag, |
| 15230 |
BUS_DMA_NOWAIT, |
| 15231 |
&fp->tx_mbuf_chain[j].m_map)) { |
| 15232 |
/* XXX unwind and free previous fastpath allocations */ |
| 15233 |
BLOGE(sc, "Failed to create dma map for " |
| 15234 |
"'fp %d tx mbuf %d' (%d)\n", |
| 15235 |
i, j, rc); |
| 15236 |
return (1); |
| 15237 |
} |
| 15238 |
} |
| 15239 |
|
| 15240 |
/***********************/ |
| 15241 |
/* FP RX MBUF DMA MAPS */ |
| 15242 |
/***********************/ |
| 15243 |
|
| 15244 |
/* create a dma tag for the rx mbufs */ |
| 15245 |
rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ |
| 15246 |
1, /* alignment */ |
| 15247 |
0, /* boundary limit */ |
| 15248 |
BUS_SPACE_MAXADDR, /* restricted low */ |
| 15249 |
BUS_SPACE_MAXADDR, /* restricted hi */ |
| 15250 |
NULL, /* addr filter() */ |
| 15251 |
NULL, /* addr filter() arg */ |
| 15252 |
MJUM9BYTES, /* max map size */ |
| 15253 |
1, /* num discontinuous */ |
| 15254 |
MJUM9BYTES, /* max seg size */ |
| 15255 |
0, /* flags */ |
| 15256 |
NULL, /* lock() */ |
| 15257 |
NULL, /* lock() arg */ |
| 15258 |
&fp->rx_mbuf_tag); /* returned dma tag */ |
| 15259 |
if (rc != 0) { |
| 15260 |
/* XXX unwind and free previous fastpath allocations */ |
| 15261 |
BLOGE(sc, "Failed to create dma tag for " |
| 15262 |
"'fp %d rx mbufs' (%d)\n", |
| 15263 |
i, rc); |
| 15264 |
return (1); |
| 15265 |
} |
| 15266 |
|
| 15267 |
/* create dma maps for each of the rx mbuf clusters */ |
| 15268 |
for (j = 0; j < RX_BD_TOTAL; j++) { |
| 15269 |
if (bus_dmamap_create(fp->rx_mbuf_tag, |
| 15270 |
BUS_DMA_NOWAIT, |
| 15271 |
&fp->rx_mbuf_chain[j].m_map)) { |
| 15272 |
/* XXX unwind and free previous fastpath allocations */ |
| 15273 |
BLOGE(sc, "Failed to create dma map for " |
| 15274 |
"'fp %d rx mbuf %d' (%d)\n", |
| 15275 |
i, j, rc); |
| 15276 |
return (1); |
| 15277 |
} |
| 15278 |
} |
| 15279 |
|
| 15280 |
/* create dma map for the spare rx mbuf cluster */ |
| 15281 |
if (bus_dmamap_create(fp->rx_mbuf_tag, |
| 15282 |
BUS_DMA_NOWAIT, |
| 15283 |
&fp->rx_mbuf_spare_map)) { |
| 15284 |
/* XXX unwind and free previous fastpath allocations */ |
| 15285 |
BLOGE(sc, "Failed to create dma map for " |
| 15286 |
"'fp %d spare rx mbuf' (%d)\n", |
| 15287 |
i, rc); |
| 15288 |
return (1); |
| 15289 |
} |
| 15290 |
|
| 15291 |
/***************************/ |
| 15292 |
/* FP RX SGE MBUF DMA MAPS */ |
| 15293 |
/***************************/ |
| 15294 |
|
| 15295 |
/* create a dma tag for the rx sge mbufs */ |
| 15296 |
rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ |
| 15297 |
1, /* alignment */ |
| 15298 |
0, /* boundary limit */ |
| 15299 |
BUS_SPACE_MAXADDR, /* restricted low */ |
| 15300 |
BUS_SPACE_MAXADDR, /* restricted hi */ |
| 15301 |
NULL, /* addr filter() */ |
| 15302 |
NULL, /* addr filter() arg */ |
| 15303 |
BCM_PAGE_SIZE, /* max map size */ |
| 15304 |
1, /* num discontinuous */ |
| 15305 |
BCM_PAGE_SIZE, /* max seg size */ |
| 15306 |
0, /* flags */ |
| 15307 |
NULL, /* lock() */ |
| 15308 |
NULL, /* lock() arg */ |
| 15309 |
&fp->rx_sge_mbuf_tag); /* returned dma tag */ |
| 15310 |
if (rc != 0) { |
| 15311 |
/* XXX unwind and free previous fastpath allocations */ |
| 15312 |
BLOGE(sc, "Failed to create dma tag for " |
| 15313 |
"'fp %d rx sge mbufs' (%d)\n", |
| 15314 |
i, rc); |
| 15315 |
return (1); |
| 15316 |
} |
| 15317 |
|
| 15318 |
/* create dma maps for the rx sge mbuf clusters */ |
| 15319 |
for (j = 0; j < RX_SGE_TOTAL; j++) { |
| 15320 |
if (bus_dmamap_create(fp->rx_sge_mbuf_tag, |
| 15321 |
BUS_DMA_NOWAIT, |
| 15322 |
&fp->rx_sge_mbuf_chain[j].m_map)) { |
| 15323 |
/* XXX unwind and free previous fastpath allocations */ |
| 15324 |
BLOGE(sc, "Failed to create dma map for " |
| 15325 |
"'fp %d rx sge mbuf %d' (%d)\n", |
| 15326 |
i, j, rc); |
| 15327 |
return (1); |
| 15328 |
} |
| 15329 |
} |
| 15330 |
|
| 15331 |
/* create dma map for the spare rx sge mbuf cluster */ |
| 15332 |
if (bus_dmamap_create(fp->rx_sge_mbuf_tag, |
| 15333 |
BUS_DMA_NOWAIT, |
| 15334 |
&fp->rx_sge_mbuf_spare_map)) { |
| 15335 |
/* XXX unwind and free previous fastpath allocations */ |
| 15336 |
BLOGE(sc, "Failed to create dma map for " |
| 15337 |
"'fp %d spare rx sge mbuf' (%d)\n", |
| 15338 |
i, rc); |
| 15339 |
return (1); |
| 15340 |
} |
| 15341 |
|
| 15342 |
/***************************/ |
| 15343 |
/* FP RX TPA MBUF DMA MAPS */ |
| 15344 |
/***************************/ |
| 15345 |
|
| 15346 |
/* create dma maps for the rx tpa mbuf clusters */ |
| 15347 |
max_agg_queues = MAX_AGG_QS(sc); |
| 15348 |
|
| 15349 |
for (j = 0; j < max_agg_queues; j++) { |
| 15350 |
if (bus_dmamap_create(fp->rx_mbuf_tag, |
| 15351 |
BUS_DMA_NOWAIT, |
| 15352 |
&fp->rx_tpa_info[j].bd.m_map)) { |
| 15353 |
/* XXX unwind and free previous fastpath allocations */ |
| 15354 |
BLOGE(sc, "Failed to create dma map for " |
| 15355 |
"'fp %d rx tpa mbuf %d' (%d)\n", |
| 15356 |
i, j, rc); |
| 15357 |
return (1); |
| 15358 |
} |
| 15359 |
} |
| 15360 |
|
| 15361 |
/* create dma map for the spare rx tpa mbuf cluster */ |
| 15362 |
if (bus_dmamap_create(fp->rx_mbuf_tag, |
| 15363 |
BUS_DMA_NOWAIT, |
| 15364 |
&fp->rx_tpa_info_mbuf_spare_map)) { |
| 15365 |
/* XXX unwind and free previous fastpath allocations */ |
| 15366 |
BLOGE(sc, "Failed to create dma map for " |
| 15367 |
"'fp %d spare rx tpa mbuf' (%d)\n", |
| 15368 |
i, rc); |
| 15369 |
return (1); |
| 15370 |
} |
| 15371 |
|
| 15372 |
bxe_init_sge_ring_bit_mask(fp); |
| 15373 |
} |
| 15374 |
|
| 15375 |
return (0); |
| 15376 |
} |
| 15377 |
|
| 15378 |
static void |
| 15379 |
bxe_free_hsi_mem(struct bxe_softc *sc) |
| 15380 |
{ |
| 15381 |
struct bxe_fastpath *fp; |
| 15382 |
int max_agg_queues; |
| 15383 |
int i, j; |
| 15384 |
|
| 15385 |
if (sc->parent_dma_tag == NULL) { |
| 15386 |
return; /* assume nothing was allocated */ |
| 15387 |
} |
| 15388 |
|
| 15389 |
for (i = 0; i < sc->num_queues; i++) { |
| 15390 |
fp = &sc->fp[i]; |
| 15391 |
|
| 15392 |
/*******************/ |
| 15393 |
/* FP STATUS BLOCK */ |
| 15394 |
/*******************/ |
| 15395 |
|
| 15396 |
bxe_dma_free(sc, &fp->sb_dma); |
| 15397 |
memset(&fp->status_block, 0, sizeof(fp->status_block)); |
| 15398 |
|
| 15399 |
/******************/ |
| 15400 |
/* FP TX BD CHAIN */ |
| 15401 |
/******************/ |
| 15402 |
|
| 15403 |
bxe_dma_free(sc, &fp->tx_dma); |
| 15404 |
fp->tx_chain = NULL; |
| 15405 |
|
| 15406 |
/******************/ |
| 15407 |
/* FP RX BD CHAIN */ |
| 15408 |
/******************/ |
| 15409 |
|
| 15410 |
bxe_dma_free(sc, &fp->rx_dma); |
| 15411 |
fp->rx_chain = NULL; |
| 15412 |
|
| 15413 |
/*******************/ |
| 15414 |
/* FP RX RCQ CHAIN */ |
| 15415 |
/*******************/ |
| 15416 |
|
| 15417 |
bxe_dma_free(sc, &fp->rcq_dma); |
| 15418 |
fp->rcq_chain = NULL; |
| 15419 |
|
| 15420 |
/*******************/ |
| 15421 |
/* FP RX SGE CHAIN */ |
| 15422 |
/*******************/ |
| 15423 |
|
| 15424 |
bxe_dma_free(sc, &fp->rx_sge_dma); |
| 15425 |
fp->rx_sge_chain = NULL; |
| 15426 |
|
| 15427 |
/***********************/ |
| 15428 |
/* FP TX MBUF DMA MAPS */ |
| 15429 |
/***********************/ |
| 15430 |
|
| 15431 |
if (fp->tx_mbuf_tag != NULL) { |
| 15432 |
for (j = 0; j < TX_BD_TOTAL; j++) { |
| 15433 |
if (fp->tx_mbuf_chain[j].m_map != NULL) { |
| 15434 |
bus_dmamap_unload(fp->tx_mbuf_tag, |
| 15435 |
fp->tx_mbuf_chain[j].m_map); |
| 15436 |
bus_dmamap_destroy(fp->tx_mbuf_tag, |
| 15437 |
fp->tx_mbuf_chain[j].m_map); |
| 15438 |
} |
| 15439 |
} |
| 15440 |
|
| 15441 |
bus_dma_tag_destroy(fp->tx_mbuf_tag); |
| 15442 |
fp->tx_mbuf_tag = NULL; |
| 15443 |
} |
| 15444 |
|
| 15445 |
/***********************/ |
| 15446 |
/* FP RX MBUF DMA MAPS */ |
| 15447 |
/***********************/ |
| 15448 |
|
| 15449 |
if (fp->rx_mbuf_tag != NULL) { |
| 15450 |
for (j = 0; j < RX_BD_TOTAL; j++) { |
| 15451 |
if (fp->rx_mbuf_chain[j].m_map != NULL) { |
| 15452 |
bus_dmamap_unload(fp->rx_mbuf_tag, |
| 15453 |
fp->rx_mbuf_chain[j].m_map); |
| 15454 |
bus_dmamap_destroy(fp->rx_mbuf_tag, |
| 15455 |
fp->rx_mbuf_chain[j].m_map); |
| 15456 |
} |
| 15457 |
} |
| 15458 |
|
| 15459 |
if (fp->rx_mbuf_spare_map != NULL) { |
| 15460 |
bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); |
| 15461 |
bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); |
| 15462 |
} |
| 15463 |
|
| 15464 |
/***************************/ |
| 15465 |
/* FP RX TPA MBUF DMA MAPS */ |
| 15466 |
/***************************/ |
| 15467 |
|
| 15468 |
max_agg_queues = MAX_AGG_QS(sc); |
| 15469 |
|
| 15470 |
for (j = 0; j < max_agg_queues; j++) { |
| 15471 |
if (fp->rx_tpa_info[j].bd.m_map != NULL) { |
| 15472 |
bus_dmamap_unload(fp->rx_mbuf_tag, |
| 15473 |
fp->rx_tpa_info[j].bd.m_map); |
| 15474 |
bus_dmamap_destroy(fp->rx_mbuf_tag, |
| 15475 |
fp->rx_tpa_info[j].bd.m_map); |
| 15476 |
} |
| 15477 |
} |
| 15478 |
|
| 15479 |
if (fp->rx_tpa_info_mbuf_spare_map != NULL) { |
| 15480 |
bus_dmamap_unload(fp->rx_mbuf_tag, |
| 15481 |
fp->rx_tpa_info_mbuf_spare_map); |
| 15482 |
bus_dmamap_destroy(fp->rx_mbuf_tag, |
| 15483 |
fp->rx_tpa_info_mbuf_spare_map); |
| 15484 |
} |
| 15485 |
|
| 15486 |
bus_dma_tag_destroy(fp->rx_mbuf_tag); |
| 15487 |
fp->rx_mbuf_tag = NULL; |
| 15488 |
} |
| 15489 |
|
| 15490 |
/***************************/ |
| 15491 |
/* FP RX SGE MBUF DMA MAPS */ |
| 15492 |
/***************************/ |
| 15493 |
|
| 15494 |
if (fp->rx_sge_mbuf_tag != NULL) { |
| 15495 |
for (j = 0; j < RX_SGE_TOTAL; j++) { |
| 15496 |
if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { |
| 15497 |
bus_dmamap_unload(fp->rx_sge_mbuf_tag, |
| 15498 |
fp->rx_sge_mbuf_chain[j].m_map); |
| 15499 |
bus_dmamap_destroy(fp->rx_sge_mbuf_tag, |
| 15500 |
fp->rx_sge_mbuf_chain[j].m_map); |
| 15501 |
} |
| 15502 |
} |
| 15503 |
|
| 15504 |
if (fp->rx_sge_mbuf_spare_map != NULL) { |
| 15505 |
bus_dmamap_unload(fp->rx_sge_mbuf_tag, |
| 15506 |
fp->rx_sge_mbuf_spare_map); |
| 15507 |
bus_dmamap_destroy(fp->rx_sge_mbuf_tag, |
| 15508 |
fp->rx_sge_mbuf_spare_map); |
| 15509 |
} |
| 15510 |
|
| 15511 |
bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); |
| 15512 |
fp->rx_sge_mbuf_tag = NULL; |
| 15513 |
} |
| 15514 |
} |
| 15515 |
|
| 15516 |
/***************************/ |
| 15517 |
/* FW DECOMPRESSION BUFFER */ |
| 15518 |
/***************************/ |
| 15519 |
|
| 15520 |
bxe_dma_free(sc, &sc->gz_buf_dma); |
| 15521 |
sc->gz_buf = NULL; |
| 15522 |
free(sc->gz_strm, M_DEVBUF); |
| 15523 |
sc->gz_strm = NULL; |
| 15524 |
|
| 15525 |
/*******************/ |
| 15526 |
/* SLOW PATH QUEUE */ |
| 15527 |
/*******************/ |
| 15528 |
|
| 15529 |
bxe_dma_free(sc, &sc->spq_dma); |
| 15530 |
sc->spq = NULL; |
| 15531 |
|
| 15532 |
/*************/ |
| 15533 |
/* SLOW PATH */ |
| 15534 |
/*************/ |
| 15535 |
|
| 15536 |
bxe_dma_free(sc, &sc->sp_dma); |
| 15537 |
sc->sp = NULL; |
| 15538 |
|
| 15539 |
/***************/ |
| 15540 |
/* EVENT QUEUE */ |
| 15541 |
/***************/ |
| 15542 |
|
| 15543 |
bxe_dma_free(sc, &sc->eq_dma); |
| 15544 |
sc->eq = NULL; |
| 15545 |
|
| 15546 |
/************************/ |
| 15547 |
/* DEFAULT STATUS BLOCK */ |
| 15548 |
/************************/ |
| 15549 |
|
| 15550 |
bxe_dma_free(sc, &sc->def_sb_dma); |
| 15551 |
sc->def_sb = NULL; |
| 15552 |
|
| 15553 |
bus_dma_tag_destroy(sc->parent_dma_tag); |
| 15554 |
sc->parent_dma_tag = NULL; |
| 15555 |
} |
| 15556 |
|
| 15557 |
/* |
| 15558 |
* Previous driver DMAE transaction may have occurred when pre-boot stage |
| 15559 |
* ended and boot began. This would invalidate the addresses of the |
| 15560 |
* transaction, resulting in was-error bit set in the PCI causing all |
| 15561 |
* hw-to-host PCIe transactions to timeout. If this happened we want to clear |
| 15562 |
* the interrupt which detected this from the pglueb and the was-done bit |
| 15563 |
*/ |
| 15564 |
static void |
| 15565 |
bxe_prev_interrupted_dmae(struct bxe_softc *sc) |
| 15566 |
{ |
| 15567 |
uint32_t val; |
| 15568 |
|
| 15569 |
if (!CHIP_IS_E1x(sc)) { |
| 15570 |
val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); |
| 15571 |
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { |
| 15572 |
BLOGD(sc, DBG_LOAD, |
| 15573 |
"Clearing 'was-error' bit that was set in pglueb"); |
| 15574 |
REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); |
| 15575 |
} |
| 15576 |
} |
| 15577 |
} |
| 15578 |
|
| 15579 |
static int |
| 15580 |
bxe_prev_mcp_done(struct bxe_softc *sc) |
| 15581 |
{ |
| 15582 |
uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, |
| 15583 |
DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); |
| 15584 |
if (!rc) { |
| 15585 |
BLOGE(sc, "MCP response failure, aborting\n"); |
| 15586 |
return (-1); |
| 15587 |
} |
| 15588 |
|
| 15589 |
return (0); |
| 15590 |
} |
| 15591 |
|
| 15592 |
static struct bxe_prev_list_node * |
| 15593 |
bxe_prev_path_get_entry(struct bxe_softc *sc) |
| 15594 |
{ |
| 15595 |
struct bxe_prev_list_node *tmp; |
| 15596 |
|
| 15597 |
LIST_FOREACH(tmp, &bxe_prev_list, node) { |
| 15598 |
if ((sc->pcie_bus == tmp->bus) && |
| 15599 |
(sc->pcie_device == tmp->slot) && |
| 15600 |
(SC_PATH(sc) == tmp->path)) { |
| 15601 |
return (tmp); |
| 15602 |
} |
| 15603 |
} |
| 15604 |
|
| 15605 |
return (NULL); |
| 15606 |
} |
| 15607 |
|
| 15608 |
static uint8_t |
| 15609 |
bxe_prev_is_path_marked(struct bxe_softc *sc) |
| 15610 |
{ |
| 15611 |
struct bxe_prev_list_node *tmp; |
| 15612 |
int rc = FALSE; |
| 15613 |
|
| 15614 |
mtx_lock(&bxe_prev_mtx); |
| 15615 |
|
| 15616 |
tmp = bxe_prev_path_get_entry(sc); |
| 15617 |
if (tmp) { |
| 15618 |
if (tmp->aer) { |
| 15619 |
BLOGD(sc, DBG_LOAD, |
| 15620 |
"Path %d/%d/%d was marked by AER\n", |
| 15621 |
sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); |
| 15622 |
} else { |
| 15623 |
rc = TRUE; |
| 15624 |
BLOGD(sc, DBG_LOAD, |
| 15625 |
"Path %d/%d/%d was already cleaned from previous drivers\n", |
| 15626 |
sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); |
| 15627 |
} |
| 15628 |
} |
| 15629 |
|
| 15630 |
mtx_unlock(&bxe_prev_mtx); |
| 15631 |
|
| 15632 |
return (rc); |
| 15633 |
} |
| 15634 |
|
| 15635 |
static int |
| 15636 |
bxe_prev_mark_path(struct bxe_softc *sc, |
| 15637 |
uint8_t after_undi) |
| 15638 |
{ |
| 15639 |
struct bxe_prev_list_node *tmp; |
| 15640 |
|
| 15641 |
mtx_lock(&bxe_prev_mtx); |
| 15642 |
|
| 15643 |
/* Check whether the entry for this path already exists */ |
| 15644 |
tmp = bxe_prev_path_get_entry(sc); |
| 15645 |
if (tmp) { |
| 15646 |
if (!tmp->aer) { |
| 15647 |
BLOGD(sc, DBG_LOAD, |
| 15648 |
"Re-marking AER in path %d/%d/%d\n", |
| 15649 |
sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); |
| 15650 |
} else { |
| 15651 |
BLOGD(sc, DBG_LOAD, |
| 15652 |
"Removing AER indication from path %d/%d/%d\n", |
| 15653 |
sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); |
| 15654 |
tmp->aer = 0; |
| 15655 |
} |
| 15656 |
|
| 15657 |
mtx_unlock(&bxe_prev_mtx); |
| 15658 |
return (0); |
| 15659 |
} |
| 15660 |
|
| 15661 |
mtx_unlock(&bxe_prev_mtx); |
| 15662 |
|
| 15663 |
/* Create an entry for this path and add it */ |
| 15664 |
tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, |
| 15665 |
(M_NOWAIT | M_ZERO)); |
| 15666 |
if (!tmp) { |
| 15667 |
BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); |
| 15668 |
return (-1); |
| 15669 |
} |
| 15670 |
|
| 15671 |
tmp->bus = sc->pcie_bus; |
| 15672 |
tmp->slot = sc->pcie_device; |
| 15673 |
tmp->path = SC_PATH(sc); |
| 15674 |
tmp->aer = 0; |
| 15675 |
tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; |
| 15676 |
|
| 15677 |
mtx_lock(&bxe_prev_mtx); |
| 15678 |
|
| 15679 |
BLOGD(sc, DBG_LOAD, |
| 15680 |
"Marked path %d/%d/%d - finished previous unload\n", |
| 15681 |
sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); |
| 15682 |
LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); |
| 15683 |
|
| 15684 |
mtx_unlock(&bxe_prev_mtx); |
| 15685 |
|
| 15686 |
return (0); |
| 15687 |
} |
| 15688 |
|
| 15689 |
static int |
| 15690 |
bxe_do_flr(struct bxe_softc *sc) |
| 15691 |
{ |
| 15692 |
int i; |
| 15693 |
|
| 15694 |
/* only E2 and onwards support FLR */ |
| 15695 |
if (CHIP_IS_E1x(sc)) { |
| 15696 |
BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); |
| 15697 |
return (-1); |
| 15698 |
} |
| 15699 |
|
| 15700 |
/* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ |
| 15701 |
if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { |
| 15702 |
BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", |
| 15703 |
sc->devinfo.bc_ver); |
| 15704 |
return (-1); |
| 15705 |
} |
| 15706 |
|
| 15707 |
/* Wait for Transaction Pending bit clean */ |
| 15708 |
for (i = 0; i < 4; i++) { |
| 15709 |
if (i) { |
| 15710 |
DELAY(((1 << (i - 1)) * 100) * 1000); |
| 15711 |
} |
| 15712 |
|
| 15713 |
if (!bxe_is_pcie_pending(sc)) { |
| 15714 |
goto clear; |
| 15715 |
} |
| 15716 |
} |
| 15717 |
|
| 15718 |
BLOGE(sc, "PCIE transaction is not cleared, " |
| 15719 |
"proceeding with reset anyway\n"); |
| 15720 |
|
| 15721 |
clear: |
| 15722 |
|
| 15723 |
BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); |
| 15724 |
bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); |
| 15725 |
|
| 15726 |
return (0); |
| 15727 |
} |
| 15728 |
|
| 15729 |
struct bxe_mac_vals { |
| 15730 |
uint32_t xmac_addr; |
| 15731 |
uint32_t xmac_val; |
| 15732 |
uint32_t emac_addr; |
| 15733 |
uint32_t emac_val; |
| 15734 |
uint32_t umac_addr; |
| 15735 |
uint32_t umac_val; |
| 15736 |
uint32_t bmac_addr; |
| 15737 |
uint32_t bmac_val[2]; |
| 15738 |
}; |
| 15739 |
|
| 15740 |
static void |
| 15741 |
bxe_prev_unload_close_mac(struct bxe_softc *sc, |
| 15742 |
struct bxe_mac_vals *vals) |
| 15743 |
{ |
| 15744 |
uint32_t val, base_addr, offset, mask, reset_reg; |
| 15745 |
uint8_t mac_stopped = FALSE; |
| 15746 |
uint8_t port = SC_PORT(sc); |
| 15747 |
uint32_t wb_data[2]; |
| 15748 |
|
| 15749 |
/* reset addresses as they also mark which values were changed */ |
| 15750 |
vals->bmac_addr = 0; |
| 15751 |
vals->umac_addr = 0; |
| 15752 |
vals->xmac_addr = 0; |
| 15753 |
vals->emac_addr = 0; |
| 15754 |
|
| 15755 |
reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); |
| 15756 |
|
| 15757 |
if (!CHIP_IS_E3(sc)) { |
| 15758 |
val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); |
| 15759 |
mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; |
| 15760 |
if ((mask & reset_reg) && val) { |
| 15761 |
BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); |
| 15762 |
base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM |
| 15763 |
: NIG_REG_INGRESS_BMAC0_MEM; |
| 15764 |
offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL |
| 15765 |
: BIGMAC_REGISTER_BMAC_CONTROL; |
| 15766 |
|
| 15767 |
/* |
| 15768 |
* use rd/wr since we cannot use dmae. This is safe |
| 15769 |
* since MCP won't access the bus due to the request |
| 15770 |
* to unload, and no function on the path can be |
| 15771 |
* loaded at this time. |
| 15772 |
*/ |
| 15773 |
wb_data[0] = REG_RD(sc, base_addr + offset); |
| 15774 |
wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); |
| 15775 |
vals->bmac_addr = base_addr + offset; |
| 15776 |
vals->bmac_val[0] = wb_data[0]; |
| 15777 |
vals->bmac_val[1] = wb_data[1]; |
| 15778 |
wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; |
| 15779 |
REG_WR(sc, vals->bmac_addr, wb_data[0]); |
| 15780 |
REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); |
| 15781 |
} |
| 15782 |
|
| 15783 |
BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); |
| 15784 |
vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; |
| 15785 |
vals->emac_val = REG_RD(sc, vals->emac_addr); |
| 15786 |
REG_WR(sc, vals->emac_addr, 0); |
| 15787 |
mac_stopped = TRUE; |
| 15788 |
} else { |
| 15789 |
if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { |
| 15790 |
BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); |
| 15791 |
base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; |
| 15792 |
val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); |
| 15793 |
REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); |
| 15794 |
REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); |
| 15795 |
vals->xmac_addr = base_addr + XMAC_REG_CTRL; |
| 15796 |
vals->xmac_val = REG_RD(sc, vals->xmac_addr); |
| 15797 |
REG_WR(sc, vals->xmac_addr, 0); |
| 15798 |
mac_stopped = TRUE; |
| 15799 |
} |
| 15800 |
|
| 15801 |
mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; |
| 15802 |
if (mask & reset_reg) { |
| 15803 |
BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); |
| 15804 |
base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; |
| 15805 |
vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; |
| 15806 |
vals->umac_val = REG_RD(sc, vals->umac_addr); |
| 15807 |
REG_WR(sc, vals->umac_addr, 0); |
| 15808 |
mac_stopped = TRUE; |
| 15809 |
} |
| 15810 |
} |
| 15811 |
|
| 15812 |
if (mac_stopped) { |
| 15813 |
DELAY(20000); |
| 15814 |
} |
| 15815 |
} |
| 15816 |
|
| 15817 |
#define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) |
| 15818 |
#define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) |
| 15819 |
#define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) |
| 15820 |
#define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) |
| 15821 |
|
| 15822 |
static void |
| 15823 |
bxe_prev_unload_undi_inc(struct bxe_softc *sc, |
| 15824 |
uint8_t port, |
| 15825 |
uint8_t inc) |
| 15826 |
{ |
| 15827 |
uint16_t rcq, bd; |
| 15828 |
uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); |
| 15829 |
|
| 15830 |
rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; |
| 15831 |
bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; |
| 15832 |
|
| 15833 |
tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); |
| 15834 |
REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); |
| 15835 |
|
| 15836 |
BLOGD(sc, DBG_LOAD, |
| 15837 |
"UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", |
| 15838 |
port, bd, rcq); |
| 15839 |
} |
| 15840 |
|
| 15841 |
static int |
| 15842 |
bxe_prev_unload_common(struct bxe_softc *sc) |
| 15843 |
{ |
| 15844 |
uint32_t reset_reg, tmp_reg = 0, rc; |
| 15845 |
uint8_t prev_undi = FALSE; |
| 15846 |
struct bxe_mac_vals mac_vals; |
| 15847 |
uint32_t timer_count = 1000; |
| 15848 |
uint32_t prev_brb; |
| 15849 |
|
| 15850 |
/* |
| 15851 |
* It is possible a previous function received 'common' answer, |
| 15852 |
* but hasn't loaded yet, therefore creating a scenario of |
| 15853 |
* multiple functions receiving 'common' on the same path. |
| 15854 |
*/ |
| 15855 |
BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); |
| 15856 |
|
| 15857 |
memset(&mac_vals, 0, sizeof(mac_vals)); |
| 15858 |
|
| 15859 |
if (bxe_prev_is_path_marked(sc)) { |
| 15860 |
return (bxe_prev_mcp_done(sc)); |
| 15861 |
} |
| 15862 |
|
| 15863 |
reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); |
| 15864 |
|
| 15865 |
/* Reset should be performed after BRB is emptied */ |
| 15866 |
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { |
| 15867 |
/* Close the MAC Rx to prevent BRB from filling up */ |
| 15868 |
bxe_prev_unload_close_mac(sc, &mac_vals); |
| 15869 |
|
| 15870 |
/* close LLH filters towards the BRB */ |
| 15871 |
elink_set_rx_filter(&sc->link_params, 0); |
| 15872 |
|
| 15873 |
/* |
| 15874 |
* Check if the UNDI driver was previously loaded. |
| 15875 |
* UNDI driver initializes CID offset for normal bell to 0x7 |
| 15876 |
*/ |
| 15877 |
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { |
| 15878 |
tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); |
| 15879 |
if (tmp_reg == 0x7) { |
| 15880 |
BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); |
| 15881 |
prev_undi = TRUE; |
| 15882 |
/* clear the UNDI indication */ |
| 15883 |
REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); |
| 15884 |
/* clear possible idle check errors */ |
| 15885 |
REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); |
| 15886 |
} |
| 15887 |
} |
| 15888 |
|
| 15889 |
/* wait until BRB is empty */ |
| 15890 |
tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); |
| 15891 |
while (timer_count) { |
| 15892 |
prev_brb = tmp_reg; |
| 15893 |
|
| 15894 |
tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); |
| 15895 |
if (!tmp_reg) { |
| 15896 |
break; |
| 15897 |
} |
| 15898 |
|
| 15899 |
BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); |
| 15900 |
|
| 15901 |
/* reset timer as long as BRB actually gets emptied */ |
| 15902 |
if (prev_brb > tmp_reg) { |
| 15903 |
timer_count = 1000; |
| 15904 |
} else { |
| 15905 |
timer_count--; |
| 15906 |
} |
| 15907 |
|
| 15908 |
/* If UNDI resides in memory, manually increment it */ |
| 15909 |
if (prev_undi) { |
| 15910 |
bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); |
| 15911 |
} |
| 15912 |
|
| 15913 |
DELAY(10); |
| 15914 |
} |
| 15915 |
|
| 15916 |
if (!timer_count) { |
| 15917 |
BLOGE(sc, "Failed to empty BRB\n"); |
| 15918 |
} |
| 15919 |
} |
| 15920 |
|
| 15921 |
/* No packets are in the pipeline, path is ready for reset */ |
| 15922 |
bxe_reset_common(sc); |
| 15923 |
|
| 15924 |
if (mac_vals.xmac_addr) { |
| 15925 |
REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); |
| 15926 |
} |
| 15927 |
if (mac_vals.umac_addr) { |
| 15928 |
REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); |
| 15929 |
} |
| 15930 |
if (mac_vals.emac_addr) { |
| 15931 |
REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); |
| 15932 |
} |
| 15933 |
if (mac_vals.bmac_addr) { |
| 15934 |
REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); |
| 15935 |
REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); |
| 15936 |
} |
| 15937 |
|
| 15938 |
rc = bxe_prev_mark_path(sc, prev_undi); |
| 15939 |
if (rc) { |
| 15940 |
bxe_prev_mcp_done(sc); |
| 15941 |
return (rc); |
| 15942 |
} |
| 15943 |
|
| 15944 |
return (bxe_prev_mcp_done(sc)); |
| 15945 |
} |
| 15946 |
|
| 15947 |
static int |
| 15948 |
bxe_prev_unload_uncommon(struct bxe_softc *sc) |
| 15949 |
{ |
| 15950 |
int rc; |
| 15951 |
|
| 15952 |
BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); |
| 15953 |
|
| 15954 |
/* Test if previous unload process was already finished for this path */ |
| 15955 |
if (bxe_prev_is_path_marked(sc)) { |
| 15956 |
return (bxe_prev_mcp_done(sc)); |
| 15957 |
} |
| 15958 |
|
| 15959 |
BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); |
| 15960 |
|
| 15961 |
/* |
| 15962 |
* If function has FLR capabilities, and existing FW version matches |
| 15963 |
* the one required, then FLR will be sufficient to clean any residue |
| 15964 |
* left by previous driver |
| 15965 |
*/ |
| 15966 |
rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); |
| 15967 |
if (!rc) { |
| 15968 |
/* fw version is good */ |
| 15969 |
BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); |
| 15970 |
rc = bxe_do_flr(sc); |
| 15971 |
} |
| 15972 |
|
| 15973 |
if (!rc) { |
| 15974 |
/* FLR was performed */ |
| 15975 |
BLOGD(sc, DBG_LOAD, "FLR successful\n"); |
| 15976 |
return (0); |
| 15977 |
} |
| 15978 |
|
| 15979 |
BLOGD(sc, DBG_LOAD, "Could not FLR\n"); |
| 15980 |
|
| 15981 |
/* Close the MCP request, return failure*/ |
| 15982 |
rc = bxe_prev_mcp_done(sc); |
| 15983 |
if (!rc) { |
| 15984 |
rc = BXE_PREV_WAIT_NEEDED; |
| 15985 |
} |
| 15986 |
|
| 15987 |
return (rc); |
| 15988 |
} |
| 15989 |
|
| 15990 |
static int |
| 15991 |
bxe_prev_unload(struct bxe_softc *sc) |
| 15992 |
{ |
| 15993 |
int time_counter = 10; |
| 15994 |
uint32_t fw, hw_lock_reg, hw_lock_val; |
| 15995 |
uint32_t rc = 0; |
| 15996 |
|
| 15997 |
/* |
| 15998 |
* Clear HW from errors which may have resulted from an interrupted |
| 15999 |
* DMAE transaction. |
| 16000 |
*/ |
| 16001 |
bxe_prev_interrupted_dmae(sc); |
| 16002 |
|
| 16003 |
/* Release previously held locks */ |
| 16004 |
hw_lock_reg = |
| 16005 |
(SC_FUNC(sc) <= 5) ? |
| 16006 |
(MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : |
| 16007 |
(MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); |
| 16008 |
|
| 16009 |
hw_lock_val = (REG_RD(sc, hw_lock_reg)); |
| 16010 |
if (hw_lock_val) { |
| 16011 |
if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { |
| 16012 |
BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); |
| 16013 |
REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, |
| 16014 |
(MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); |
| 16015 |
} |
| 16016 |
BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); |
| 16017 |
REG_WR(sc, hw_lock_reg, 0xffffffff); |
| 16018 |
} else { |
| 16019 |
BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); |
| 16020 |
} |
| 16021 |
|
| 16022 |
if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { |
| 16023 |
BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); |
| 16024 |
REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); |
| 16025 |
} |
| 16026 |
|
| 16027 |
do { |
| 16028 |
/* Lock MCP using an unload request */ |
| 16029 |
fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); |
| 16030 |
if (!fw) { |
| 16031 |
BLOGE(sc, "MCP response failure, aborting\n"); |
| 16032 |
rc = -1; |
| 16033 |
break; |
| 16034 |
} |
| 16035 |
|
| 16036 |
if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { |
| 16037 |
rc = bxe_prev_unload_common(sc); |
| 16038 |
break; |
| 16039 |
} |
| 16040 |
|
| 16041 |
/* non-common reply from MCP night require looping */ |
| 16042 |
rc = bxe_prev_unload_uncommon(sc); |
| 16043 |
if (rc != BXE_PREV_WAIT_NEEDED) { |
| 16044 |
break; |
| 16045 |
} |
| 16046 |
|
| 16047 |
DELAY(20000); |
| 16048 |
} while (--time_counter); |
| 16049 |
|
| 16050 |
if (!time_counter || rc) { |
| 16051 |
BLOGE(sc, "Failed to unload previous driver!\n"); |
| 16052 |
rc = -1; |
| 16053 |
} |
| 16054 |
|
| 16055 |
return (rc); |
| 16056 |
} |
| 16057 |
|
| 16058 |
void |
| 16059 |
bxe_dcbx_set_state(struct bxe_softc *sc, |
| 16060 |
uint8_t dcb_on, |
| 16061 |
uint32_t dcbx_enabled) |
| 16062 |
{ |
| 16063 |
if (!CHIP_IS_E1x(sc)) { |
| 16064 |
sc->dcb_state = dcb_on; |
| 16065 |
sc->dcbx_enabled = dcbx_enabled; |
| 16066 |
} else { |
| 16067 |
sc->dcb_state = FALSE; |
| 16068 |
sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; |
| 16069 |
} |
| 16070 |
BLOGD(sc, DBG_LOAD, |
| 16071 |
"DCB state [%s:%s]\n", |
| 16072 |
dcb_on ? "ON" : "OFF", |
| 16073 |
(dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : |
| 16074 |
(dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : |
| 16075 |
(dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? |
| 16076 |
"on-chip with negotiation" : "invalid"); |
| 16077 |
} |
| 16078 |
|
| 16079 |
/* must be called after sriov-enable */ |
| 16080 |
static int |
| 16081 |
bxe_set_qm_cid_count(struct bxe_softc *sc) |
| 16082 |
{ |
| 16083 |
int cid_count = BXE_L2_MAX_CID(sc); |
| 16084 |
|
| 16085 |
if (IS_SRIOV(sc)) { |
| 16086 |
cid_count += BXE_VF_CIDS; |
| 16087 |
} |
| 16088 |
|
| 16089 |
if (CNIC_SUPPORT(sc)) { |
| 16090 |
cid_count += CNIC_CID_MAX; |
| 16091 |
} |
| 16092 |
|
| 16093 |
return (roundup(cid_count, QM_CID_ROUND)); |
| 16094 |
} |
| 16095 |
|
| 16096 |
static void |
| 16097 |
bxe_init_multi_cos(struct bxe_softc *sc) |
| 16098 |
{ |
| 16099 |
int pri, cos; |
| 16100 |
|
| 16101 |
uint32_t pri_map = 0; /* XXX change to user config */ |
| 16102 |
|
| 16103 |
for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { |
| 16104 |
cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); |
| 16105 |
if (cos < sc->max_cos) { |
| 16106 |
sc->prio_to_cos[pri] = cos; |
| 16107 |
} else { |
| 16108 |
BLOGW(sc, "Invalid COS %d for priority %d " |
| 16109 |
"(max COS is %d), setting to 0\n", |
| 16110 |
cos, pri, (sc->max_cos - 1)); |
| 16111 |
sc->prio_to_cos[pri] = 0; |
| 16112 |
} |
| 16113 |
} |
| 16114 |
} |
| 16115 |
|
| 16116 |
static int |
| 16117 |
bxe_sysctl_state(SYSCTL_HANDLER_ARGS) |
| 16118 |
{ |
| 16119 |
struct bxe_softc *sc; |
| 16120 |
int error, result; |
| 16121 |
|
| 16122 |
result = 0; |
| 16123 |
error = sysctl_handle_int(oidp, &result, 0, req); |
| 16124 |
|
| 16125 |
if (error || !req->newptr) { |
| 16126 |
return (error); |
| 16127 |
} |
| 16128 |
|
| 16129 |
if (result == 1) { |
| 16130 |
sc = (struct bxe_softc *)arg1; |
| 16131 |
BLOGI(sc, "... dumping driver state ...\n"); |
| 16132 |
/* XXX */ |
| 16133 |
} |
| 16134 |
|
| 16135 |
return (error); |
| 16136 |
} |
| 16137 |
|
| 16138 |
static int |
| 16139 |
bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) |
| 16140 |
{ |
| 16141 |
struct bxe_softc *sc = (struct bxe_softc *)arg1; |
| 16142 |
uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; |
| 16143 |
uint32_t *offset; |
| 16144 |
uint64_t value = 0; |
| 16145 |
int index = (int)arg2; |
| 16146 |
|
| 16147 |
if (index >= BXE_NUM_ETH_STATS) { |
| 16148 |
BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); |
| 16149 |
return (-1); |
| 16150 |
} |
| 16151 |
|
| 16152 |
offset = (eth_stats + bxe_eth_stats_arr[index].offset); |
| 16153 |
|
| 16154 |
switch (bxe_eth_stats_arr[index].size) { |
| 16155 |
case 4: |
| 16156 |
value = (uint64_t)*offset; |
| 16157 |
break; |
| 16158 |
case 8: |
| 16159 |
value = HILO_U64(*offset, *(offset + 1)); |
| 16160 |
break; |
| 16161 |
default: |
| 16162 |
BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", |
| 16163 |
index, bxe_eth_stats_arr[index].size); |
| 16164 |
return (-1); |
| 16165 |
} |
| 16166 |
|
| 16167 |
return (sysctl_handle_64(oidp, &value, 0, req)); |
| 16168 |
} |
| 16169 |
|
| 16170 |
static int |
| 16171 |
bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) |
| 16172 |
{ |
| 16173 |
struct bxe_softc *sc = (struct bxe_softc *)arg1; |
| 16174 |
uint32_t *eth_stats; |
| 16175 |
uint32_t *offset; |
| 16176 |
uint64_t value = 0; |
| 16177 |
uint32_t q_stat = (uint32_t)arg2; |
| 16178 |
uint32_t fp_index = ((q_stat >> 16) & 0xffff); |
| 16179 |
uint32_t index = (q_stat & 0xffff); |
| 16180 |
|
| 16181 |
eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; |
| 16182 |
|
| 16183 |
if (index >= BXE_NUM_ETH_Q_STATS) { |
| 16184 |
BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); |
| 16185 |
return (-1); |
| 16186 |
} |
| 16187 |
|
| 16188 |
offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); |
| 16189 |
|
| 16190 |
switch (bxe_eth_q_stats_arr[index].size) { |
| 16191 |
case 4: |
| 16192 |
value = (uint64_t)*offset; |
| 16193 |
break; |
| 16194 |
case 8: |
| 16195 |
value = HILO_U64(*offset, *(offset + 1)); |
| 16196 |
break; |
| 16197 |
default: |
| 16198 |
BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", |
| 16199 |
index, bxe_eth_q_stats_arr[index].size); |
| 16200 |
return (-1); |
| 16201 |
} |
| 16202 |
|
| 16203 |
return (sysctl_handle_64(oidp, &value, 0, req)); |
| 16204 |
} |
| 16205 |
|
| 16206 |
static void |
| 16207 |
bxe_add_sysctls(struct bxe_softc *sc) |
| 16208 |
{ |
| 16209 |
struct sysctl_ctx_list *ctx; |
| 16210 |
struct sysctl_oid_list *children; |
| 16211 |
struct sysctl_oid *queue_top, *queue; |
| 16212 |
struct sysctl_oid_list *queue_top_children, *queue_children; |
| 16213 |
char queue_num_buf[32]; |
| 16214 |
uint32_t q_stat; |
| 16215 |
int i, j; |
| 16216 |
|
| 16217 |
ctx = device_get_sysctl_ctx(sc->dev); |
| 16218 |
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); |
| 16219 |
|
| 16220 |
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", |
| 16221 |
CTLFLAG_RD, BXE_DRIVER_VERSION, 0, |
| 16222 |
"version"); |
| 16223 |
|
| 16224 |
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", |
| 16225 |
CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0, |
| 16226 |
"bootcode version"); |
| 16227 |
|
| 16228 |
snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", |
| 16229 |
BCM_5710_FW_MAJOR_VERSION, |
| 16230 |
BCM_5710_FW_MINOR_VERSION, |
| 16231 |
BCM_5710_FW_REVISION_VERSION, |
| 16232 |
BCM_5710_FW_ENGINEERING_VERSION); |
| 16233 |
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", |
| 16234 |
CTLFLAG_RD, &sc->fw_ver_str, 0, |
| 16235 |
"firmware version"); |
| 16236 |
|
| 16237 |
snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", |
| 16238 |
((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : |
| 16239 |
(sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : |
| 16240 |
(sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : |
| 16241 |
(sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : |
| 16242 |
"Unknown")); |
| 16243 |
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", |
| 16244 |
CTLFLAG_RD, &sc->mf_mode_str, 0, |
| 16245 |
"multifunction mode"); |
| 16246 |
|
| 16247 |
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", |
| 16248 |
CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, |
| 16249 |
"multifunction vnics per port"); |
| 16250 |
|
| 16251 |
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", |
| 16252 |
CTLFLAG_RD, &sc->mac_addr_str, 0, |
| 16253 |
"mac address"); |
| 16254 |
|
| 16255 |
snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", |
| 16256 |
((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : |
| 16257 |
(sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : |
| 16258 |
(sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : |
| 16259 |
"???GT/s"), |
| 16260 |
sc->devinfo.pcie_link_width); |
| 16261 |
SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", |
| 16262 |
CTLFLAG_RD, &sc->pci_link_str, 0, |
| 16263 |
"pci link status"); |
| 16264 |
|
| 16265 |
sc->debug = bxe_debug; |
| 16266 |
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug", |
| 16267 |
CTLFLAG_RW, &sc->debug, 0, |
| 16268 |
"debug logging mode"); |
| 16269 |
|
| 16270 |
sc->rx_budget = bxe_rx_budget; |
| 16271 |
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", |
| 16272 |
CTLFLAG_RW, &sc->rx_budget, 0, |
| 16273 |
"rx processing budget"); |
| 16274 |
|
| 16275 |
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", |
| 16276 |
CTLTYPE_UINT | CTLFLAG_RW, sc, 0, |
| 16277 |
bxe_sysctl_state, "IU", "dump driver state"); |
| 16278 |
|
| 16279 |
for (i = 0; i < BXE_NUM_ETH_STATS; i++) { |
| 16280 |
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, |
| 16281 |
bxe_eth_stats_arr[i].string, |
| 16282 |
CTLTYPE_U64 | CTLFLAG_RD, sc, i, |
| 16283 |
bxe_sysctl_eth_stat, "LU", |
| 16284 |
bxe_eth_stats_arr[i].string); |
| 16285 |
} |
| 16286 |
|
| 16287 |
/* add a new parent node for all queues "dev.bxe.#.queue" */ |
| 16288 |
queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", |
| 16289 |
CTLFLAG_RD, NULL, "queue"); |
| 16290 |
queue_top_children = SYSCTL_CHILDREN(queue_top); |
| 16291 |
|
| 16292 |
for (i = 0; i < sc->num_queues; i++) { |
| 16293 |
/* add a new parent node for a single queue "dev.bxe.#.queue.#" */ |
| 16294 |
snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); |
| 16295 |
queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, |
| 16296 |
queue_num_buf, CTLFLAG_RD, NULL, |
| 16297 |
"single queue"); |
| 16298 |
queue_children = SYSCTL_CHILDREN(queue); |
| 16299 |
|
| 16300 |
for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { |
| 16301 |
q_stat = ((i << 16) | j); |
| 16302 |
SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, |
| 16303 |
bxe_eth_q_stats_arr[j].string, |
| 16304 |
CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, |
| 16305 |
bxe_sysctl_eth_q_stat, "LU", |
| 16306 |
bxe_eth_q_stats_arr[j].string); |
| 16307 |
} |
| 16308 |
} |
| 16309 |
} |
| 16310 |
|
| 16311 |
/* |
| 16312 |
* Device attach function. |
| 16313 |
* |
| 16314 |
* Allocates device resources, performs secondary chip identification, and |
| 16315 |
* initializes driver instance variables. This function is called from driver |
| 16316 |
* load after a successful probe. |
| 16317 |
* |
| 16318 |
* Returns: |
| 16319 |
* 0 = Success, >0 = Failure |
| 16320 |
*/ |
| 16321 |
static int |
| 16322 |
bxe_attach(device_t dev) |
| 16323 |
{ |
| 16324 |
struct bxe_softc *sc; |
| 16325 |
|
| 16326 |
sc = device_get_softc(dev); |
| 16327 |
|
| 16328 |
BLOGD(sc, DBG_LOAD, "Starting attach...\n"); |
| 16329 |
|
| 16330 |
sc->state = BXE_STATE_CLOSED; |
| 16331 |
|
| 16332 |
sc->dev = dev; |
| 16333 |
sc->unit = device_get_unit(dev); |
| 16334 |
|
| 16335 |
BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); |
| 16336 |
|
| 16337 |
sc->pcie_bus = pci_get_bus(dev); |
| 16338 |
sc->pcie_device = pci_get_slot(dev); |
| 16339 |
sc->pcie_func = pci_get_function(dev); |
| 16340 |
|
| 16341 |
/* enable bus master capability */ |
| 16342 |
pci_enable_busmaster(dev); |
| 16343 |
|
| 16344 |
/* get the BARs */ |
| 16345 |
if (bxe_allocate_bars(sc) != 0) { |
| 16346 |
return (ENXIO); |
| 16347 |
} |
| 16348 |
|
| 16349 |
/* initialize the mutexes */ |
| 16350 |
bxe_init_mutexes(sc); |
| 16351 |
|
| 16352 |
/* prepare the periodic callout */ |
| 16353 |
callout_init(&sc->periodic_callout, 0); |
| 16354 |
|
| 16355 |
/* prepare the chip taskqueue */ |
| 16356 |
sc->chip_tq_flags = CHIP_TQ_NONE; |
| 16357 |
snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), |
| 16358 |
"bxe%d_chip_tq", sc->unit); |
| 16359 |
TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); |
| 16360 |
sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, |
| 16361 |
taskqueue_thread_enqueue, |
| 16362 |
&sc->chip_tq); |
| 16363 |
taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ |
| 16364 |
"%s", sc->chip_tq_name); |
| 16365 |
|
| 16366 |
/* get device info and set params */ |
| 16367 |
if (bxe_get_device_info(sc) != 0) { |
| 16368 |
BLOGE(sc, "getting device info\n"); |
| 16369 |
bxe_deallocate_bars(sc); |
| 16370 |
pci_disable_busmaster(dev); |
| 16371 |
return (ENXIO); |
| 16372 |
} |
| 16373 |
|
| 16374 |
/* get final misc params */ |
| 16375 |
bxe_get_params(sc); |
| 16376 |
|
| 16377 |
/* set the default MTU (changed via ifconfig) */ |
| 16378 |
sc->mtu = ETHERMTU; |
| 16379 |
|
| 16380 |
bxe_set_modes_bitmap(sc); |
| 16381 |
|
| 16382 |
/* XXX |
| 16383 |
* If in AFEX mode and the function is configured for FCoE |
| 16384 |
* then bail... no L2 allowed. |
| 16385 |
*/ |
| 16386 |
|
| 16387 |
/* get phy settings from shmem and 'and' against admin settings */ |
| 16388 |
bxe_get_phy_info(sc); |
| 16389 |
|
| 16390 |
/* initialize the FreeBSD ifnet interface */ |
| 16391 |
if (bxe_init_ifnet(sc) != 0) { |
| 16392 |
bxe_release_mutexes(sc); |
| 16393 |
bxe_deallocate_bars(sc); |
| 16394 |
pci_disable_busmaster(dev); |
| 16395 |
return (ENXIO); |
| 16396 |
} |
| 16397 |
|
| 16398 |
/* allocate device interrupts */ |
| 16399 |
if (bxe_interrupt_alloc(sc) != 0) { |
| 16400 |
if (sc->ifp != NULL) { |
| 16401 |
ether_ifdetach_drv(sc->ifp); |
| 16402 |
} |
| 16403 |
ifmedia_removeall(&sc->ifmedia); |
| 16404 |
bxe_release_mutexes(sc); |
| 16405 |
bxe_deallocate_bars(sc); |
| 16406 |
pci_disable_busmaster(dev); |
| 16407 |
return (ENXIO); |
| 16408 |
} |
| 16409 |
|
| 16410 |
/* allocate ilt */ |
| 16411 |
if (bxe_alloc_ilt_mem(sc) != 0) { |
| 16412 |
bxe_interrupt_free(sc); |
| 16413 |
if (sc->ifp != NULL) { |
| 16414 |
ether_ifdetach_drv(sc->ifp); |
| 16415 |
} |
| 16416 |
ifmedia_removeall(&sc->ifmedia); |
| 16417 |
bxe_release_mutexes(sc); |
| 16418 |
bxe_deallocate_bars(sc); |
| 16419 |
pci_disable_busmaster(dev); |
| 16420 |
return (ENXIO); |
| 16421 |
} |
| 16422 |
|
| 16423 |
/* allocate the host hardware/software hsi structures */ |
| 16424 |
if (bxe_alloc_hsi_mem(sc) != 0) { |
| 16425 |
bxe_free_ilt_mem(sc); |
| 16426 |
bxe_interrupt_free(sc); |
| 16427 |
if (sc->ifp != NULL) { |
| 16428 |
ether_ifdetach_drv(sc->ifp); |
| 16429 |
} |
| 16430 |
ifmedia_removeall(&sc->ifmedia); |
| 16431 |
bxe_release_mutexes(sc); |
| 16432 |
bxe_deallocate_bars(sc); |
| 16433 |
pci_disable_busmaster(dev); |
| 16434 |
return (ENXIO); |
| 16435 |
} |
| 16436 |
|
| 16437 |
/* need to reset chip if UNDI was active */ |
| 16438 |
if (IS_PF(sc) && !BXE_NOMCP(sc)) { |
| 16439 |
/* init fw_seq */ |
| 16440 |
sc->fw_seq = |
| 16441 |
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & |
| 16442 |
DRV_MSG_SEQ_NUMBER_MASK); |
| 16443 |
BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); |
| 16444 |
bxe_prev_unload(sc); |
| 16445 |
} |
| 16446 |
|
| 16447 |
#if 1 |
| 16448 |
/* XXX */ |
| 16449 |
bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); |
| 16450 |
#else |
| 16451 |
if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && |
| 16452 |
SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && |
| 16453 |
SHMEM2_RD(sc, dcbx_lldp_params_offset) && |
| 16454 |
SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { |
| 16455 |
bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); |
| 16456 |
bxe_dcbx_init_params(sc); |
| 16457 |
} else { |
| 16458 |
bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); |
| 16459 |
} |
| 16460 |
#endif |
| 16461 |
|
| 16462 |
/* calculate qm_cid_count */ |
| 16463 |
sc->qm_cid_count = bxe_set_qm_cid_count(sc); |
| 16464 |
BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); |
| 16465 |
|
| 16466 |
sc->max_cos = 1; |
| 16467 |
bxe_init_multi_cos(sc); |
| 16468 |
|
| 16469 |
bxe_add_sysctls(sc); |
| 16470 |
|
| 16471 |
return (0); |
| 16472 |
} |
| 16473 |
|
| 16474 |
/* |
| 16475 |
* Device detach function. |
| 16476 |
* |
| 16477 |
* Stops the controller, resets the controller, and releases resources. |
| 16478 |
* |
| 16479 |
* Returns: |
| 16480 |
* 0 = Success, >0 = Failure |
| 16481 |
*/ |
| 16482 |
static int |
| 16483 |
bxe_detach(device_t dev) |
| 16484 |
{ |
| 16485 |
struct bxe_softc *sc; |
| 16486 |
if_t ifp; |
| 16487 |
|
| 16488 |
sc = device_get_softc(dev); |
| 16489 |
|
| 16490 |
BLOGD(sc, DBG_LOAD, "Starting detach...\n"); |
| 16491 |
|
| 16492 |
ifp = sc->ifp; |
| 16493 |
if (ifp != NULL && if_vlantrunkinuse(ifp)) { |
| 16494 |
BLOGE(sc, "Cannot detach while VLANs are in use.\n"); |
| 16495 |
return(EBUSY); |
| 16496 |
} |
| 16497 |
|
| 16498 |
/* stop the periodic callout */ |
| 16499 |
bxe_periodic_stop(sc); |
| 16500 |
|
| 16501 |
/* stop the chip taskqueue */ |
| 16502 |
atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); |
| 16503 |
if (sc->chip_tq) { |
| 16504 |
taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); |
| 16505 |
taskqueue_free(sc->chip_tq); |
| 16506 |
sc->chip_tq = NULL; |
| 16507 |
} |
| 16508 |
|
| 16509 |
/* stop and reset the controller if it was open */ |
| 16510 |
if (sc->state != BXE_STATE_CLOSED) { |
| 16511 |
BXE_CORE_LOCK(sc); |
| 16512 |
bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); |
| 16513 |
BXE_CORE_UNLOCK(sc); |
| 16514 |
} |
| 16515 |
|
| 16516 |
/* release the network interface */ |
| 16517 |
if (ifp != NULL) { |
| 16518 |
ether_ifdetach_drv(ifp); |
| 16519 |
} |
| 16520 |
ifmedia_removeall(&sc->ifmedia); |
| 16521 |
|
| 16522 |
/* XXX do the following based on driver state... */ |
| 16523 |
|
| 16524 |
/* free the host hardware/software hsi structures */ |
| 16525 |
bxe_free_hsi_mem(sc); |
| 16526 |
|
| 16527 |
/* free ilt */ |
| 16528 |
bxe_free_ilt_mem(sc); |
| 16529 |
|
| 16530 |
/* release the interrupts */ |
| 16531 |
bxe_interrupt_free(sc); |
| 16532 |
|
| 16533 |
/* Release the mutexes*/ |
| 16534 |
bxe_release_mutexes(sc); |
| 16535 |
|
| 16536 |
/* Release the PCIe BAR mapped memory */ |
| 16537 |
bxe_deallocate_bars(sc); |
| 16538 |
|
| 16539 |
/* Release the FreeBSD interface. */ |
| 16540 |
if (sc->ifp != NULL) { |
| 16541 |
if_free_drv(sc->ifp); |
| 16542 |
} |
| 16543 |
|
| 16544 |
pci_disable_busmaster(dev); |
| 16545 |
|
| 16546 |
return (0); |
| 16547 |
} |
| 16548 |
|
| 16549 |
/* |
| 16550 |
* Device shutdown function. |
| 16551 |
* |
| 16552 |
* Stops and resets the controller. |
| 16553 |
* |
| 16554 |
* Returns: |
| 16555 |
* Nothing |
| 16556 |
*/ |
| 16557 |
static int |
| 16558 |
bxe_shutdown(device_t dev) |
| 16559 |
{ |
| 16560 |
struct bxe_softc *sc; |
| 16561 |
|
| 16562 |
sc = device_get_softc(dev); |
| 16563 |
|
| 16564 |
BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); |
| 16565 |
|
| 16566 |
/* stop the periodic callout */ |
| 16567 |
bxe_periodic_stop(sc); |
| 16568 |
|
| 16569 |
BXE_CORE_LOCK(sc); |
| 16570 |
bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); |
| 16571 |
BXE_CORE_UNLOCK(sc); |
| 16572 |
|
| 16573 |
return (0); |
| 16574 |
} |
| 16575 |
|
| 16576 |
void |
| 16577 |
bxe_igu_ack_sb(struct bxe_softc *sc, |
| 16578 |
uint8_t igu_sb_id, |
| 16579 |
uint8_t segment, |
| 16580 |
uint16_t index, |
| 16581 |
uint8_t op, |
| 16582 |
uint8_t update) |
| 16583 |
{ |
| 16584 |
uint32_t igu_addr = sc->igu_base_addr; |
| 16585 |
igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; |
| 16586 |
bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); |
| 16587 |
} |
| 16588 |
|
| 16589 |
static void |
| 16590 |
bxe_igu_clear_sb_gen(struct bxe_softc *sc, |
| 16591 |
uint8_t func, |
| 16592 |
uint8_t idu_sb_id, |
| 16593 |
uint8_t is_pf) |
| 16594 |
{ |
| 16595 |
uint32_t data, ctl, cnt = 100; |
| 16596 |
uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; |
| 16597 |
uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; |
| 16598 |
uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; |
| 16599 |
uint32_t sb_bit = 1 << (idu_sb_id%32); |
| 16600 |
uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; |
| 16601 |
uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; |
| 16602 |
|
| 16603 |
/* Not supported in BC mode */ |
| 16604 |
if (CHIP_INT_MODE_IS_BC(sc)) { |
| 16605 |
return; |
| 16606 |
} |
| 16607 |
|
| 16608 |
data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << |
| 16609 |
IGU_REGULAR_CLEANUP_TYPE_SHIFT) | |
| 16610 |
IGU_REGULAR_CLEANUP_SET | |
| 16611 |
IGU_REGULAR_BCLEANUP); |
| 16612 |
|
| 16613 |
ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | |
| 16614 |
(func_encode << IGU_CTRL_REG_FID_SHIFT) | |
| 16615 |
(IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); |
| 16616 |
|
| 16617 |
BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", |
| 16618 |
data, igu_addr_data); |
| 16619 |
REG_WR(sc, igu_addr_data, data); |
| 16620 |
|
| 16621 |
bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, |
| 16622 |
BUS_SPACE_BARRIER_WRITE); |
| 16623 |
mb(); |
| 16624 |
|
| 16625 |
BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", |
| 16626 |
ctl, igu_addr_ctl); |
| 16627 |
REG_WR(sc, igu_addr_ctl, ctl); |
| 16628 |
|
| 16629 |
bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, |
| 16630 |
BUS_SPACE_BARRIER_WRITE); |
| 16631 |
mb(); |
| 16632 |
|
| 16633 |
/* wait for clean up to finish */ |
| 16634 |
while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { |
| 16635 |
DELAY(20000); |
| 16636 |
} |
| 16637 |
|
| 16638 |
if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { |
| 16639 |
BLOGD(sc, DBG_LOAD, |
| 16640 |
"Unable to finish IGU cleanup: " |
| 16641 |
"idu_sb_id %d offset %d bit %d (cnt %d)\n", |
| 16642 |
idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); |
| 16643 |
} |
| 16644 |
} |
| 16645 |
|
| 16646 |
static void |
| 16647 |
bxe_igu_clear_sb(struct bxe_softc *sc, |
| 16648 |
uint8_t idu_sb_id) |
| 16649 |
{ |
| 16650 |
bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); |
| 16651 |
} |
| 16652 |
|
| 16653 |
|
| 16654 |
|
| 16655 |
|
| 16656 |
|
| 16657 |
|
| 16658 |
|
| 16659 |
/*******************/ |
| 16660 |
/* ECORE CALLBACKS */ |
| 16661 |
/*******************/ |
| 16662 |
|
| 16663 |
static void |
| 16664 |
bxe_reset_common(struct bxe_softc *sc) |
| 16665 |
{ |
| 16666 |
uint32_t val = 0x1400; |
| 16667 |
|
| 16668 |
/* reset_common */ |
| 16669 |
REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); |
| 16670 |
|
| 16671 |
if (CHIP_IS_E3(sc)) { |
| 16672 |
val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; |
| 16673 |
val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; |
| 16674 |
} |
| 16675 |
|
| 16676 |
REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); |
| 16677 |
} |
| 16678 |
|
| 16679 |
static void |
| 16680 |
bxe_common_init_phy(struct bxe_softc *sc) |
| 16681 |
{ |
| 16682 |
uint32_t shmem_base[2]; |
| 16683 |
uint32_t shmem2_base[2]; |
| 16684 |
|
| 16685 |
/* Avoid common init in case MFW supports LFA */ |
| 16686 |
if (SHMEM2_RD(sc, size) > |
| 16687 |
(uint32_t)offsetof(struct shmem2_region, |
| 16688 |
lfa_host_addr[SC_PORT(sc)])) { |
| 16689 |
return; |
| 16690 |
} |
| 16691 |
|
| 16692 |
shmem_base[0] = sc->devinfo.shmem_base; |
| 16693 |
shmem2_base[0] = sc->devinfo.shmem2_base; |
| 16694 |
|
| 16695 |
if (!CHIP_IS_E1x(sc)) { |
| 16696 |
shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); |
| 16697 |
shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); |
| 16698 |
} |
| 16699 |
|
| 16700 |
BXE_PHY_LOCK(sc); |
| 16701 |
elink_common_init_phy(sc, shmem_base, shmem2_base, |
| 16702 |
sc->devinfo.chip_id, 0); |
| 16703 |
BXE_PHY_UNLOCK(sc); |
| 16704 |
} |
| 16705 |
|
| 16706 |
static void |
| 16707 |
bxe_pf_disable(struct bxe_softc *sc) |
| 16708 |
{ |
| 16709 |
uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); |
| 16710 |
|
| 16711 |
val &= ~IGU_PF_CONF_FUNC_EN; |
| 16712 |
|
| 16713 |
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); |
| 16714 |
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); |
| 16715 |
REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); |
| 16716 |
} |
| 16717 |
|
| 16718 |
static void |
| 16719 |
bxe_init_pxp(struct bxe_softc *sc) |
| 16720 |
{ |
| 16721 |
uint16_t devctl; |
| 16722 |
int r_order, w_order; |
| 16723 |
|
| 16724 |
devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); |
| 16725 |
|
| 16726 |
BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); |
| 16727 |
|
| 16728 |
w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); |
| 16729 |
|
| 16730 |
if (sc->mrrs == -1) { |
| 16731 |
r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); |
| 16732 |
} else { |
| 16733 |
BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); |
| 16734 |
r_order = sc->mrrs; |
| 16735 |
} |
| 16736 |
|
| 16737 |
ecore_init_pxp_arb(sc, r_order, w_order); |
| 16738 |
} |
| 16739 |
|
| 16740 |
static uint32_t |
| 16741 |
bxe_get_pretend_reg(struct bxe_softc *sc) |
| 16742 |
{ |
| 16743 |
uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; |
| 16744 |
uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); |
| 16745 |
return (base + (SC_ABS_FUNC(sc)) * stride); |
| 16746 |
} |
| 16747 |
|
| 16748 |
/* |
| 16749 |
* Called only on E1H or E2. |
| 16750 |
* When pretending to be PF, the pretend value is the function number 0..7. |
| 16751 |
* When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID |
| 16752 |
* combination. |
| 16753 |
*/ |
| 16754 |
static int |
| 16755 |
bxe_pretend_func(struct bxe_softc *sc, |
| 16756 |
uint16_t pretend_func_val) |
| 16757 |
{ |
| 16758 |
uint32_t pretend_reg; |
| 16759 |
|
| 16760 |
if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { |
| 16761 |
return (-1); |
| 16762 |
} |
| 16763 |
|
| 16764 |
/* get my own pretend register */ |
| 16765 |
pretend_reg = bxe_get_pretend_reg(sc); |
| 16766 |
REG_WR(sc, pretend_reg, pretend_func_val); |
| 16767 |
REG_RD(sc, pretend_reg); |
| 16768 |
return (0); |
| 16769 |
} |
| 16770 |
|
| 16771 |
static void |
| 16772 |
bxe_iov_init_dmae(struct bxe_softc *sc) |
| 16773 |
{ |
| 16774 |
return; |
| 16775 |
#if 0 |
| 16776 |
BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF"); |
| 16777 |
|
| 16778 |
if (!IS_SRIOV(sc)) { |
| 16779 |
return; |
| 16780 |
} |
| 16781 |
|
| 16782 |
REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0); |
| 16783 |
#endif |
| 16784 |
} |
| 16785 |
|
| 16786 |
#if 0 |
| 16787 |
static int |
| 16788 |
bxe_iov_init_ilt(struct bxe_softc *sc, |
| 16789 |
uint16_t line) |
| 16790 |
{ |
| 16791 |
return (line); |
| 16792 |
#if 0 |
| 16793 |
int i; |
| 16794 |
struct ecore_ilt* ilt = sc->ilt; |
| 16795 |
|
| 16796 |
if (!IS_SRIOV(sc)) { |
| 16797 |
return (line); |
| 16798 |
} |
| 16799 |
|
| 16800 |
/* set vfs ilt lines */ |
| 16801 |
for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) { |
| 16802 |
struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i); |
| 16803 |
ilt->lines[line+i].page = hw_cxt->addr; |
| 16804 |
ilt->lines[line+i].page_mapping = hw_cxt->mapping; |
| 16805 |
ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ |
| 16806 |
} |
| 16807 |
return (line+i); |
| 16808 |
#endif |
| 16809 |
} |
| 16810 |
#endif |
| 16811 |
|
| 16812 |
static void |
| 16813 |
bxe_iov_init_dq(struct bxe_softc *sc) |
| 16814 |
{ |
| 16815 |
return; |
| 16816 |
#if 0 |
| 16817 |
if (!IS_SRIOV(sc)) { |
| 16818 |
return; |
| 16819 |
} |
| 16820 |
|
| 16821 |
/* Set the DQ such that the CID reflect the abs_vfid */ |
| 16822 |
REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0); |
| 16823 |
REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); |
| 16824 |
|
| 16825 |
/* |
| 16826 |
* Set VFs starting CID. If its > 0 the preceding CIDs are belong to |
| 16827 |
* the PF L2 queues |
| 16828 |
*/ |
| 16829 |
REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); |
| 16830 |
|
| 16831 |
/* The VF window size is the log2 of the max number of CIDs per VF */ |
| 16832 |
REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); |
| 16833 |
|
| 16834 |
/* |
| 16835 |
* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match |
| 16836 |
* the Pf doorbell size although the 2 are independent. |
| 16837 |
*/ |
| 16838 |
REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST, |
| 16839 |
BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); |
| 16840 |
|
| 16841 |
/* |
| 16842 |
* No security checks for now - |
| 16843 |
* configure single rule (out of 16) mask = 0x1, value = 0x0, |
| 16844 |
* CID range 0 - 0x1ffff |
| 16845 |
*/ |
| 16846 |
REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1); |
| 16847 |
REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0); |
| 16848 |
REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); |
| 16849 |
REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); |
| 16850 |
|
| 16851 |
/* set the number of VF alllowed doorbells to the full DQ range */ |
| 16852 |
REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); |
| 16853 |
|
| 16854 |
/* set the VF doorbell threshold */ |
| 16855 |
REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4); |
| 16856 |
#endif |
| 16857 |
} |
| 16858 |
|
| 16859 |
/* send a NIG loopback debug packet */ |
| 16860 |
static void |
| 16861 |
bxe_lb_pckt(struct bxe_softc *sc) |
| 16862 |
{ |
| 16863 |
uint32_t wb_write[3]; |
| 16864 |
|
| 16865 |
/* Ethernet source and destination addresses */ |
| 16866 |
wb_write[0] = 0x55555555; |
| 16867 |
wb_write[1] = 0x55555555; |
| 16868 |
wb_write[2] = 0x20; /* SOP */ |
| 16869 |
REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); |
| 16870 |
|
| 16871 |
/* NON-IP protocol */ |
| 16872 |
wb_write[0] = 0x09000000; |
| 16873 |
wb_write[1] = 0x55555555; |
| 16874 |
wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ |
| 16875 |
REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); |
| 16876 |
} |
| 16877 |
|
| 16878 |
/* |
| 16879 |
* Some of the internal memories are not directly readable from the driver. |
| 16880 |
* To test them we send debug packets. |
| 16881 |
*/ |
| 16882 |
static int |
| 16883 |
bxe_int_mem_test(struct bxe_softc *sc) |
| 16884 |
{ |
| 16885 |
int factor; |
| 16886 |
int count, i; |
| 16887 |
uint32_t val = 0; |
| 16888 |
|
| 16889 |
if (CHIP_REV_IS_FPGA(sc)) { |
| 16890 |
factor = 120; |
| 16891 |
} else if (CHIP_REV_IS_EMUL(sc)) { |
| 16892 |
factor = 200; |
| 16893 |
} else { |
| 16894 |
factor = 1; |
| 16895 |
} |
| 16896 |
|
| 16897 |
/* disable inputs of parser neighbor blocks */ |
| 16898 |
REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); |
| 16899 |
REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); |
| 16900 |
REG_WR(sc, CFC_REG_DEBUG0, 0x1); |
| 16901 |
REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); |
| 16902 |
|
| 16903 |
/* write 0 to parser credits for CFC search request */ |
| 16904 |
REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); |
| 16905 |
|
| 16906 |
/* send Ethernet packet */ |
| 16907 |
bxe_lb_pckt(sc); |
| 16908 |
|
| 16909 |
/* TODO do i reset NIG statistic? */ |
| 16910 |
/* Wait until NIG register shows 1 packet of size 0x10 */ |
| 16911 |
count = 1000 * factor; |
| 16912 |
while (count) { |
| 16913 |
bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); |
| 16914 |
val = *BXE_SP(sc, wb_data[0]); |
| 16915 |
if (val == 0x10) { |
| 16916 |
break; |
| 16917 |
} |
| 16918 |
|
| 16919 |
DELAY(10000); |
| 16920 |
count--; |
| 16921 |
} |
| 16922 |
|
| 16923 |
if (val != 0x10) { |
| 16924 |
BLOGE(sc, "NIG timeout val=0x%x\n", val); |
| 16925 |
return (-1); |
| 16926 |
} |
| 16927 |
|
| 16928 |
/* wait until PRS register shows 1 packet */ |
| 16929 |
count = (1000 * factor); |
| 16930 |
while (count) { |
| 16931 |
val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); |
| 16932 |
if (val == 1) { |
| 16933 |
break; |
| 16934 |
} |
| 16935 |
|
| 16936 |
DELAY(10000); |
| 16937 |
count--; |
| 16938 |
} |
| 16939 |
|
| 16940 |
if (val != 0x1) { |
| 16941 |
BLOGE(sc, "PRS timeout val=0x%x\n", val); |
| 16942 |
return (-2); |
| 16943 |
} |
| 16944 |
|
| 16945 |
/* Reset and init BRB, PRS */ |
| 16946 |
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); |
| 16947 |
DELAY(50000); |
| 16948 |
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); |
| 16949 |
DELAY(50000); |
| 16950 |
ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); |
| 16951 |
ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); |
| 16952 |
|
| 16953 |
/* Disable inputs of parser neighbor blocks */ |
| 16954 |
REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); |
| 16955 |
REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); |
| 16956 |
REG_WR(sc, CFC_REG_DEBUG0, 0x1); |
| 16957 |
REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); |
| 16958 |
|
| 16959 |
/* Write 0 to parser credits for CFC search request */ |
| 16960 |
REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); |
| 16961 |
|
| 16962 |
/* send 10 Ethernet packets */ |
| 16963 |
for (i = 0; i < 10; i++) { |
| 16964 |
bxe_lb_pckt(sc); |
| 16965 |
} |
| 16966 |
|
| 16967 |
/* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ |
| 16968 |
count = (1000 * factor); |
| 16969 |
while (count) { |
| 16970 |
bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); |
| 16971 |
val = *BXE_SP(sc, wb_data[0]); |
| 16972 |
if (val == 0xb0) { |
| 16973 |
break; |
| 16974 |
} |
| 16975 |
|
| 16976 |
DELAY(10000); |
| 16977 |
count--; |
| 16978 |
} |
| 16979 |
|
| 16980 |
if (val != 0xb0) { |
| 16981 |
BLOGE(sc, "NIG timeout val=0x%x\n", val); |
| 16982 |
return (-3); |
| 16983 |
} |
| 16984 |
|
| 16985 |
/* Wait until PRS register shows 2 packets */ |
| 16986 |
val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); |
| 16987 |
if (val != 2) { |
| 16988 |
BLOGE(sc, "PRS timeout val=0x%x\n", val); |
| 16989 |
} |
| 16990 |
|
| 16991 |
/* Write 1 to parser credits for CFC search request */ |
| 16992 |
REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); |
| 16993 |
|
| 16994 |
/* Wait until PRS register shows 3 packets */ |
| 16995 |
DELAY(10000 * factor); |
| 16996 |
|
| 16997 |
/* Wait until NIG register shows 1 packet of size 0x10 */ |
| 16998 |
val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); |
| 16999 |
if (val != 3) { |
| 17000 |
BLOGE(sc, "PRS timeout val=0x%x\n", val); |
| 17001 |
} |
| 17002 |
|
| 17003 |
/* clear NIG EOP FIFO */ |
| 17004 |
for (i = 0; i < 11; i++) { |
| 17005 |
REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); |
| 17006 |
} |
| 17007 |
|
| 17008 |
val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); |
| 17009 |
if (val != 1) { |
| 17010 |
BLOGE(sc, "clear of NIG failed\n"); |
| 17011 |
return (-4); |
| 17012 |
} |
| 17013 |
|
| 17014 |
/* Reset and init BRB, PRS, NIG */ |
| 17015 |
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); |
| 17016 |
DELAY(50000); |
| 17017 |
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); |
| 17018 |
DELAY(50000); |
| 17019 |
ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); |
| 17020 |
ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); |
| 17021 |
if (!CNIC_SUPPORT(sc)) { |
| 17022 |
/* set NIC mode */ |
| 17023 |
REG_WR(sc, PRS_REG_NIC_MODE, 1); |
| 17024 |
} |
| 17025 |
|
| 17026 |
/* Enable inputs of parser neighbor blocks */ |
| 17027 |
REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); |
| 17028 |
REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); |
| 17029 |
REG_WR(sc, CFC_REG_DEBUG0, 0x0); |
| 17030 |
REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); |
| 17031 |
|
| 17032 |
return (0); |
| 17033 |
} |
| 17034 |
|
| 17035 |
static void |
| 17036 |
bxe_setup_fan_failure_detection(struct bxe_softc *sc) |
| 17037 |
{ |
| 17038 |
int is_required; |
| 17039 |
uint32_t val; |
| 17040 |
int port; |
| 17041 |
|
| 17042 |
is_required = 0; |
| 17043 |
val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & |
| 17044 |
SHARED_HW_CFG_FAN_FAILURE_MASK); |
| 17045 |
|
| 17046 |
if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { |
| 17047 |
is_required = 1; |
| 17048 |
} |
| 17049 |
/* |
| 17050 |
* The fan failure mechanism is usually related to the PHY type since |
| 17051 |
* the power consumption of the board is affected by the PHY. Currently, |
| 17052 |
* fan is required for most designs with SFX7101, BCM8727 and BCM8481. |
| 17053 |
*/ |
| 17054 |
else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { |
| 17055 |
for (port = PORT_0; port < PORT_MAX; port++) { |
| 17056 |
is_required |= elink_fan_failure_det_req(sc, |
| 17057 |
sc->devinfo.shmem_base, |
| 17058 |
sc->devinfo.shmem2_base, |
| 17059 |
port); |
| 17060 |
} |
| 17061 |
} |
| 17062 |
|
| 17063 |
BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); |
| 17064 |
|
| 17065 |
if (is_required == 0) { |
| 17066 |
return; |
| 17067 |
} |
| 17068 |
|
| 17069 |
/* Fan failure is indicated by SPIO 5 */ |
| 17070 |
bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); |
| 17071 |
|
| 17072 |
/* set to active low mode */ |
| 17073 |
val = REG_RD(sc, MISC_REG_SPIO_INT); |
| 17074 |
val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); |
| 17075 |
REG_WR(sc, MISC_REG_SPIO_INT, val); |
| 17076 |
|
| 17077 |
/* enable interrupt to signal the IGU */ |
| 17078 |
val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); |
| 17079 |
val |= MISC_SPIO_SPIO5; |
| 17080 |
REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); |
| 17081 |
} |
| 17082 |
|
| 17083 |
static void |
| 17084 |
bxe_enable_blocks_attention(struct bxe_softc *sc) |
| 17085 |
{ |
| 17086 |
uint32_t val; |
| 17087 |
|
| 17088 |
REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); |
| 17089 |
if (!CHIP_IS_E1x(sc)) { |
| 17090 |
REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); |
| 17091 |
} else { |
| 17092 |
REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); |
| 17093 |
} |
| 17094 |
REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); |
| 17095 |
REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); |
| 17096 |
/* |
| 17097 |
* mask read length error interrupts in brb for parser |
| 17098 |
* (parsing unit and 'checksum and crc' unit) |
| 17099 |
* these errors are legal (PU reads fixed length and CAC can cause |
| 17100 |
* read length error on truncated packets) |
| 17101 |
*/ |
| 17102 |
REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); |
| 17103 |
REG_WR(sc, QM_REG_QM_INT_MASK, 0); |
| 17104 |
REG_WR(sc, TM_REG_TM_INT_MASK, 0); |
| 17105 |
REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); |
| 17106 |
REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); |
| 17107 |
REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); |
| 17108 |
/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ |
| 17109 |
/* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ |
| 17110 |
REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); |
| 17111 |
REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); |
| 17112 |
REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); |
| 17113 |
/* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ |
| 17114 |
/* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ |
| 17115 |
REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); |
| 17116 |
REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); |
| 17117 |
REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); |
| 17118 |
REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); |
| 17119 |
/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ |
| 17120 |
/* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ |
| 17121 |
|
| 17122 |
val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | |
| 17123 |
PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | |
| 17124 |
PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); |
| 17125 |
if (!CHIP_IS_E1x(sc)) { |
| 17126 |
val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | |
| 17127 |
PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); |
| 17128 |
} |
| 17129 |
REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); |
| 17130 |
|
| 17131 |
REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); |
| 17132 |
REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); |
| 17133 |
REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); |
| 17134 |
/* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ |
| 17135 |
|
| 17136 |
if (!CHIP_IS_E1x(sc)) { |
| 17137 |
/* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ |
| 17138 |
REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); |
| 17139 |
} |
| 17140 |
|
| 17141 |
REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); |
| 17142 |
REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); |
| 17143 |
/* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ |
| 17144 |
REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ |
| 17145 |
} |
| 17146 |
|
| 17147 |
/** |
| 17148 |
* bxe_init_hw_common - initialize the HW at the COMMON phase. |
| 17149 |
* |
| 17150 |
* @sc: driver handle |
| 17151 |
*/ |
| 17152 |
static int |
| 17153 |
bxe_init_hw_common(struct bxe_softc *sc) |
| 17154 |
{ |
| 17155 |
uint8_t abs_func_id; |
| 17156 |
uint32_t val; |
| 17157 |
|
| 17158 |
BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", |
| 17159 |
SC_ABS_FUNC(sc)); |
| 17160 |
|
| 17161 |
/* |
| 17162 |
* take the RESET lock to protect undi_unload flow from accessing |
| 17163 |
* registers while we are resetting the chip |
| 17164 |
*/ |
| 17165 |
bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); |
| 17166 |
|
| 17167 |
bxe_reset_common(sc); |
| 17168 |
|
| 17169 |
REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); |
| 17170 |
|
| 17171 |
val = 0xfffc; |
| 17172 |
if (CHIP_IS_E3(sc)) { |
| 17173 |
val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; |
| 17174 |
val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; |
| 17175 |
} |
| 17176 |
|
| 17177 |
REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); |
| 17178 |
|
| 17179 |
bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); |
| 17180 |
|
| 17181 |
ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); |
| 17182 |
BLOGD(sc, DBG_LOAD, "after misc block init\n"); |
| 17183 |
|
| 17184 |
if (!CHIP_IS_E1x(sc)) { |
| 17185 |
/* |
| 17186 |
* 4-port mode or 2-port mode we need to turn off master-enable for |
| 17187 |
* everyone. After that we turn it back on for self. So, we disregard |
| 17188 |
* multi-function, and always disable all functions on the given path, |
| 17189 |
* this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 |
| 17190 |
*/ |
| 17191 |
for (abs_func_id = SC_PATH(sc); |
| 17192 |
abs_func_id < (E2_FUNC_MAX * 2); |
| 17193 |
abs_func_id += 2) { |
| 17194 |
if (abs_func_id == SC_ABS_FUNC(sc)) { |
| 17195 |
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); |
| 17196 |
continue; |
| 17197 |
} |
| 17198 |
|
| 17199 |
bxe_pretend_func(sc, abs_func_id); |
| 17200 |
|
| 17201 |
/* clear pf enable */ |
| 17202 |
bxe_pf_disable(sc); |
| 17203 |
|
| 17204 |
bxe_pretend_func(sc, SC_ABS_FUNC(sc)); |
| 17205 |
} |
| 17206 |
} |
| 17207 |
|
| 17208 |
BLOGD(sc, DBG_LOAD, "after pf disable\n"); |
| 17209 |
|
| 17210 |
ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); |
| 17211 |
|
| 17212 |
if (CHIP_IS_E1(sc)) { |
| 17213 |
/* |
| 17214 |
* enable HW interrupt from PXP on USDM overflow |
| 17215 |
* bit 16 on INT_MASK_0 |
| 17216 |
*/ |
| 17217 |
REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); |
| 17218 |
} |
| 17219 |
|
| 17220 |
ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); |
| 17221 |
bxe_init_pxp(sc); |
| 17222 |
|
| 17223 |
#ifdef __BIG_ENDIAN |
| 17224 |
REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); |
| 17225 |
REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); |
| 17226 |
REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); |
| 17227 |
REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); |
| 17228 |
REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); |
| 17229 |
/* make sure this value is 0 */ |
| 17230 |
REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); |
| 17231 |
|
| 17232 |
//REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); |
| 17233 |
REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); |
| 17234 |
REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); |
| 17235 |
REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); |
| 17236 |
REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); |
| 17237 |
#endif |
| 17238 |
|
| 17239 |
ecore_ilt_init_page_size(sc, INITOP_SET); |
| 17240 |
|
| 17241 |
if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { |
| 17242 |
REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); |
| 17243 |
} |
| 17244 |
|
| 17245 |
/* let the HW do it's magic... */ |
| 17246 |
DELAY(100000); |
| 17247 |
|
| 17248 |
/* finish PXP init */ |
| 17249 |
val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); |
| 17250 |
if (val != 1) { |
| 17251 |
BLOGE(sc, "PXP2 CFG failed\n"); |
| 17252 |
return (-1); |
| 17253 |
} |
| 17254 |
val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); |
| 17255 |
if (val != 1) { |
| 17256 |
BLOGE(sc, "PXP2 RD_INIT failed\n"); |
| 17257 |
return (-1); |
| 17258 |
} |
| 17259 |
|
| 17260 |
BLOGD(sc, DBG_LOAD, "after pxp init\n"); |
| 17261 |
|
| 17262 |
/* |
| 17263 |
* Timer bug workaround for E2 only. We need to set the entire ILT to have |
| 17264 |
* entries with value "0" and valid bit on. This needs to be done by the |
| 17265 |
* first PF that is loaded in a path (i.e. common phase) |
| 17266 |
*/ |
| 17267 |
if (!CHIP_IS_E1x(sc)) { |
| 17268 |
/* |
| 17269 |
* In E2 there is a bug in the timers block that can cause function 6 / 7 |
| 17270 |
* (i.e. vnic3) to start even if it is marked as "scan-off". |
| 17271 |
* This occurs when a different function (func2,3) is being marked |
| 17272 |
* as "scan-off". Real-life scenario for example: if a driver is being |
| 17273 |
* load-unloaded while func6,7 are down. This will cause the timer to access |
| 17274 |
* the ilt, translate to a logical address and send a request to read/write. |
| 17275 |
* Since the ilt for the function that is down is not valid, this will cause |
| 17276 |
* a translation error which is unrecoverable. |
| 17277 |
* The Workaround is intended to make sure that when this happens nothing |
| 17278 |
* fatal will occur. The workaround: |
| 17279 |
* 1. First PF driver which loads on a path will: |
| 17280 |
* a. After taking the chip out of reset, by using pretend, |
| 17281 |
* it will write "0" to the following registers of |
| 17282 |
* the other vnics. |
| 17283 |
* REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); |
| 17284 |
* REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); |
| 17285 |
* REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); |
| 17286 |
* And for itself it will write '1' to |
| 17287 |
* PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable |
| 17288 |
* dmae-operations (writing to pram for example.) |
| 17289 |
* note: can be done for only function 6,7 but cleaner this |
| 17290 |
* way. |
| 17291 |
* b. Write zero+valid to the entire ILT. |
| 17292 |
* c. Init the first_timers_ilt_entry, last_timers_ilt_entry of |
| 17293 |
* VNIC3 (of that port). The range allocated will be the |
| 17294 |
* entire ILT. This is needed to prevent ILT range error. |
| 17295 |
* 2. Any PF driver load flow: |
| 17296 |
* a. ILT update with the physical addresses of the allocated |
| 17297 |
* logical pages. |
| 17298 |
* b. Wait 20msec. - note that this timeout is needed to make |
| 17299 |
* sure there are no requests in one of the PXP internal |
| 17300 |
* queues with "old" ILT addresses. |
| 17301 |
* c. PF enable in the PGLC. |
| 17302 |
* d. Clear the was_error of the PF in the PGLC. (could have |
| 17303 |
* occurred while driver was down) |
| 17304 |
* e. PF enable in the CFC (WEAK + STRONG) |
| 17305 |
* f. Timers scan enable |
| 17306 |
* 3. PF driver unload flow: |
| 17307 |
* a. Clear the Timers scan_en. |
| 17308 |
* b. Polling for scan_on=0 for that PF. |
| 17309 |
* c. Clear the PF enable bit in the PXP. |
| 17310 |
* d. Clear the PF enable in the CFC (WEAK + STRONG) |
| 17311 |
* e. Write zero+valid to all ILT entries (The valid bit must |
| 17312 |
* stay set) |
| 17313 |
* f. If this is VNIC 3 of a port then also init |
| 17314 |
* first_timers_ilt_entry to zero and last_timers_ilt_entry |
| 17315 |
* to the last enrty in the ILT. |
| 17316 |
* |
| 17317 |
* Notes: |
| 17318 |
* Currently the PF error in the PGLC is non recoverable. |
| 17319 |
* In the future the there will be a recovery routine for this error. |
| 17320 |
* Currently attention is masked. |
| 17321 |
* Having an MCP lock on the load/unload process does not guarantee that |
| 17322 |
* there is no Timer disable during Func6/7 enable. This is because the |
| 17323 |
* Timers scan is currently being cleared by the MCP on FLR. |
| 17324 |
* Step 2.d can be done only for PF6/7 and the driver can also check if |
| 17325 |
* there is error before clearing it. But the flow above is simpler and |
| 17326 |
* more general. |
| 17327 |
* All ILT entries are written by zero+valid and not just PF6/7 |
| 17328 |
* ILT entries since in the future the ILT entries allocation for |
| 17329 |
* PF-s might be dynamic. |
| 17330 |
*/ |
| 17331 |
struct ilt_client_info ilt_cli; |
| 17332 |
struct ecore_ilt ilt; |
| 17333 |
|
| 17334 |
memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); |
| 17335 |
memset(&ilt, 0, sizeof(struct ecore_ilt)); |
| 17336 |
|
| 17337 |
/* initialize dummy TM client */ |
| 17338 |
ilt_cli.start = 0; |
| 17339 |
ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; |
| 17340 |
ilt_cli.client_num = ILT_CLIENT_TM; |
| 17341 |
|
| 17342 |
/* |
| 17343 |
* Step 1: set zeroes to all ilt page entries with valid bit on |
| 17344 |
* Step 2: set the timers first/last ilt entry to point |
| 17345 |
* to the entire range to prevent ILT range error for 3rd/4th |
| 17346 |
* vnic (this code assumes existence of the vnic) |
| 17347 |
* |
| 17348 |
* both steps performed by call to ecore_ilt_client_init_op() |
| 17349 |
* with dummy TM client |
| 17350 |
* |
| 17351 |
* we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT |
| 17352 |
* and his brother are split registers |
| 17353 |
*/ |
| 17354 |
|
| 17355 |
bxe_pretend_func(sc, (SC_PATH(sc) + 6)); |
| 17356 |
ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); |
| 17357 |
bxe_pretend_func(sc, SC_ABS_FUNC(sc)); |
| 17358 |
|
| 17359 |
REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); |
| 17360 |
REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); |
| 17361 |
REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); |
| 17362 |
} |
| 17363 |
|
| 17364 |
REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); |
| 17365 |
REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); |
| 17366 |
|
| 17367 |
if (!CHIP_IS_E1x(sc)) { |
| 17368 |
int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : |
| 17369 |
(CHIP_REV_IS_FPGA(sc) ? 400 : 0); |
| 17370 |
|
| 17371 |
ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); |
| 17372 |
ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); |
| 17373 |
|
| 17374 |
/* let the HW do it's magic... */ |
| 17375 |
do { |
| 17376 |
DELAY(200000); |
| 17377 |
val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); |
| 17378 |
} while (factor-- && (val != 1)); |
| 17379 |
|
| 17380 |
if (val != 1) { |
| 17381 |
BLOGE(sc, "ATC_INIT failed\n"); |
| 17382 |
return (-1); |
| 17383 |
} |
| 17384 |
} |
| 17385 |
|
| 17386 |
BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); |
| 17387 |
|
| 17388 |
ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); |
| 17389 |
|
| 17390 |
bxe_iov_init_dmae(sc); |
| 17391 |
|
| 17392 |
/* clean the DMAE memory */ |
| 17393 |
sc->dmae_ready = 1; |
| 17394 |
ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); |
| 17395 |
|
| 17396 |
ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); |
| 17397 |
|
| 17398 |
ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); |
| 17399 |
|
| 17400 |
ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); |
| 17401 |
|
| 17402 |
ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); |
| 17403 |
|
| 17404 |
bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); |
| 17405 |
bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); |
| 17406 |
bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); |
| 17407 |
bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); |
| 17408 |
|
| 17409 |
ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); |
| 17410 |
|
| 17411 |
/* QM queues pointers table */ |
| 17412 |
ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); |
| 17413 |
|
| 17414 |
/* soft reset pulse */ |
| 17415 |
REG_WR(sc, QM_REG_SOFT_RESET, 1); |
| 17416 |
REG_WR(sc, QM_REG_SOFT_RESET, 0); |
| 17417 |
|
| 17418 |
if (CNIC_SUPPORT(sc)) |
| 17419 |
ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); |
| 17420 |
|
| 17421 |
ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); |
| 17422 |
REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); |
| 17423 |
if (!CHIP_REV_IS_SLOW(sc)) { |
| 17424 |
/* enable hw interrupt from doorbell Q */ |
| 17425 |
REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); |
| 17426 |
} |
| 17427 |
|
| 17428 |
ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); |
| 17429 |
|
| 17430 |
ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); |
| 17431 |
REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); |
| 17432 |
|
| 17433 |
if (!CHIP_IS_E1(sc)) { |
| 17434 |
REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); |
| 17435 |
} |
| 17436 |
|
| 17437 |
if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { |
| 17438 |
if (IS_MF_AFEX(sc)) { |
| 17439 |
/* |
| 17440 |
* configure that AFEX and VLAN headers must be |
| 17441 |
* received in AFEX mode |
| 17442 |
*/ |
| 17443 |
REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); |
| 17444 |
REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); |
| 17445 |
REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); |
| 17446 |
REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); |
| 17447 |
REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); |
| 17448 |
} else { |
| 17449 |
/* |
| 17450 |
* Bit-map indicating which L2 hdrs may appear |
| 17451 |
* after the basic Ethernet header |
| 17452 |
*/ |
| 17453 |
REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, |
| 17454 |
sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); |
| 17455 |
} |
| 17456 |
} |
| 17457 |
|
| 17458 |
ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); |
| 17459 |
ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); |
| 17460 |
ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); |
| 17461 |
ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); |
| 17462 |
|
| 17463 |
if (!CHIP_IS_E1x(sc)) { |
| 17464 |
/* reset VFC memories */ |
| 17465 |
REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, |
| 17466 |
VFC_MEMORIES_RST_REG_CAM_RST | |
| 17467 |
VFC_MEMORIES_RST_REG_RAM_RST); |
| 17468 |
REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, |
| 17469 |
VFC_MEMORIES_RST_REG_CAM_RST | |
| 17470 |
VFC_MEMORIES_RST_REG_RAM_RST); |
| 17471 |
|
| 17472 |
DELAY(20000); |
| 17473 |
} |
| 17474 |
|
| 17475 |
ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); |
| 17476 |
ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); |
| 17477 |
ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); |
| 17478 |
ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); |
| 17479 |
|
| 17480 |
/* sync semi rtc */ |
| 17481 |
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, |
| 17482 |
0x80000000); |
| 17483 |
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, |
| 17484 |
0x80000000); |
| 17485 |
|
| 17486 |
ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); |
| 17487 |
ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); |
| 17488 |
ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); |
| 17489 |
|
| 17490 |
if (!CHIP_IS_E1x(sc)) { |
| 17491 |
if (IS_MF_AFEX(sc)) { |
| 17492 |
/* |
| 17493 |
* configure that AFEX and VLAN headers must be |
| 17494 |
* sent in AFEX mode |
| 17495 |
*/ |
| 17496 |
REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); |
| 17497 |
REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); |
| 17498 |
REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); |
| 17499 |
REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); |
| 17500 |
REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); |
| 17501 |
} else { |
| 17502 |
REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, |
| 17503 |
sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); |
| 17504 |
} |
| 17505 |
} |
| 17506 |
|
| 17507 |
REG_WR(sc, SRC_REG_SOFT_RST, 1); |
| 17508 |
|
| 17509 |
ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); |
| 17510 |
|
| 17511 |
if (CNIC_SUPPORT(sc)) { |
| 17512 |
REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); |
| 17513 |
REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); |
| 17514 |
REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); |
| 17515 |
REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); |
| 17516 |
REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); |
| 17517 |
REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); |
| 17518 |
REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); |
| 17519 |
REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); |
| 17520 |
REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); |
| 17521 |
REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); |
| 17522 |
} |
| 17523 |
REG_WR(sc, SRC_REG_SOFT_RST, 0); |
| 17524 |
|
| 17525 |
if (sizeof(union cdu_context) != 1024) { |
| 17526 |
/* we currently assume that a context is 1024 bytes */ |
| 17527 |
BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", |
| 17528 |
(long)sizeof(union cdu_context)); |
| 17529 |
} |
| 17530 |
|
| 17531 |
ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); |
| 17532 |
val = (4 << 24) + (0 << 12) + 1024; |
| 17533 |
REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); |
| 17534 |
|
| 17535 |
ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); |
| 17536 |
|
| 17537 |
REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); |
| 17538 |
/* enable context validation interrupt from CFC */ |
| 17539 |
REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); |
| 17540 |
|
| 17541 |
/* set the thresholds to prevent CFC/CDU race */ |
| 17542 |
REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); |
| 17543 |
ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); |
| 17544 |
|
| 17545 |
if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { |
| 17546 |
REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); |
| 17547 |
} |
| 17548 |
|
| 17549 |
ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); |
| 17550 |
ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); |
| 17551 |
|
| 17552 |
/* Reset PCIE errors for debug */ |
| 17553 |
REG_WR(sc, 0x2814, 0xffffffff); |
| 17554 |
REG_WR(sc, 0x3820, 0xffffffff); |
| 17555 |
|
| 17556 |
if (!CHIP_IS_E1x(sc)) { |
| 17557 |
REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, |
| 17558 |
(PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | |
| 17559 |
PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); |
| 17560 |
REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, |
| 17561 |
(PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | |
| 17562 |
PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | |
| 17563 |
PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); |
| 17564 |
REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, |
| 17565 |
(PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | |
| 17566 |
PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | |
| 17567 |
PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); |
| 17568 |
} |
| 17569 |
|
| 17570 |
ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); |
| 17571 |
|
| 17572 |
if (!CHIP_IS_E1(sc)) { |
| 17573 |
/* in E3 this done in per-port section */ |
| 17574 |
if (!CHIP_IS_E3(sc)) |
| 17575 |
REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); |
| 17576 |
} |
| 17577 |
|
| 17578 |
if (CHIP_IS_E1H(sc)) { |
| 17579 |
/* not applicable for E2 (and above ...) */ |
| 17580 |
REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); |
| 17581 |
} |
| 17582 |
|
| 17583 |
if (CHIP_REV_IS_SLOW(sc)) { |
| 17584 |
DELAY(200000); |
| 17585 |
} |
| 17586 |
|
| 17587 |
/* finish CFC init */ |
| 17588 |
val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); |
| 17589 |
if (val != 1) { |
| 17590 |
BLOGE(sc, "CFC LL_INIT failed\n"); |
| 17591 |
return (-1); |
| 17592 |
} |
| 17593 |
val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); |
| 17594 |
if (val != 1) { |
| 17595 |
BLOGE(sc, "CFC AC_INIT failed\n"); |
| 17596 |
return (-1); |
| 17597 |
} |
| 17598 |
val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); |
| 17599 |
if (val != 1) { |
| 17600 |
BLOGE(sc, "CFC CAM_INIT failed\n"); |
| 17601 |
return (-1); |
| 17602 |
} |
| 17603 |
REG_WR(sc, CFC_REG_DEBUG0, 0); |
| 17604 |
|
| 17605 |
if (CHIP_IS_E1(sc)) { |
| 17606 |
/* read NIG statistic to see if this is our first up since powerup */ |
| 17607 |
bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); |
| 17608 |
val = *BXE_SP(sc, wb_data[0]); |
| 17609 |
|
| 17610 |
/* do internal memory self test */ |
| 17611 |
if ((val == 0) && bxe_int_mem_test(sc)) { |
| 17612 |
BLOGE(sc, "internal mem self test failed\n"); |
| 17613 |
return (-1); |
| 17614 |
} |
| 17615 |
} |
| 17616 |
|
| 17617 |
bxe_setup_fan_failure_detection(sc); |
| 17618 |
|
| 17619 |
/* clear PXP2 attentions */ |
| 17620 |
REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); |
| 17621 |
|
| 17622 |
bxe_enable_blocks_attention(sc); |
| 17623 |
|
| 17624 |
if (!CHIP_REV_IS_SLOW(sc)) { |
| 17625 |
ecore_enable_blocks_parity(sc); |
| 17626 |
} |
| 17627 |
|
| 17628 |
if (!BXE_NOMCP(sc)) { |
| 17629 |
if (CHIP_IS_E1x(sc)) { |
| 17630 |
bxe_common_init_phy(sc); |
| 17631 |
} |
| 17632 |
} |
| 17633 |
|
| 17634 |
return (0); |
| 17635 |
} |
| 17636 |
|
| 17637 |
/** |
| 17638 |
* bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. |
| 17639 |
* |
| 17640 |
* @sc: driver handle |
| 17641 |
*/ |
| 17642 |
static int |
| 17643 |
bxe_init_hw_common_chip(struct bxe_softc *sc) |
| 17644 |
{ |
| 17645 |
int rc = bxe_init_hw_common(sc); |
| 17646 |
|
| 17647 |
if (rc) { |
| 17648 |
return (rc); |
| 17649 |
} |
| 17650 |
|
| 17651 |
/* In E2 2-PORT mode, same ext phy is used for the two paths */ |
| 17652 |
if (!BXE_NOMCP(sc)) { |
| 17653 |
bxe_common_init_phy(sc); |
| 17654 |
} |
| 17655 |
|
| 17656 |
return (0); |
| 17657 |
} |
| 17658 |
|
| 17659 |
static int |
| 17660 |
bxe_init_hw_port(struct bxe_softc *sc) |
| 17661 |
{ |
| 17662 |
int port = SC_PORT(sc); |
| 17663 |
int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; |
| 17664 |
uint32_t low, high; |
| 17665 |
uint32_t val; |
| 17666 |
|
| 17667 |
BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); |
| 17668 |
|
| 17669 |
REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); |
| 17670 |
|
| 17671 |
ecore_init_block(sc, BLOCK_MISC, init_phase); |
| 17672 |
ecore_init_block(sc, BLOCK_PXP, init_phase); |
| 17673 |
ecore_init_block(sc, BLOCK_PXP2, init_phase); |
| 17674 |
|
| 17675 |
/* |
| 17676 |
* Timers bug workaround: disables the pf_master bit in pglue at |
| 17677 |
* common phase, we need to enable it here before any dmae access are |
| 17678 |
* attempted. Therefore we manually added the enable-master to the |
| 17679 |
* port phase (it also happens in the function phase) |
| 17680 |
*/ |
| 17681 |
if (!CHIP_IS_E1x(sc)) { |
| 17682 |
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); |
| 17683 |
} |
| 17684 |
|
| 17685 |
ecore_init_block(sc, BLOCK_ATC, init_phase); |
| 17686 |
ecore_init_block(sc, BLOCK_DMAE, init_phase); |
| 17687 |
ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); |
| 17688 |
ecore_init_block(sc, BLOCK_QM, init_phase); |
| 17689 |
|
| 17690 |
ecore_init_block(sc, BLOCK_TCM, init_phase); |
| 17691 |
ecore_init_block(sc, BLOCK_UCM, init_phase); |
| 17692 |
ecore_init_block(sc, BLOCK_CCM, init_phase); |
| 17693 |
ecore_init_block(sc, BLOCK_XCM, init_phase); |
| 17694 |
|
| 17695 |
/* QM cid (connection) count */ |
| 17696 |
ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); |
| 17697 |
|
| 17698 |
if (CNIC_SUPPORT(sc)) { |
| 17699 |
ecore_init_block(sc, BLOCK_TM, init_phase); |
| 17700 |
REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); |
| 17701 |
REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); |
| 17702 |
} |
| 17703 |
|
| 17704 |
ecore_init_block(sc, BLOCK_DORQ, init_phase); |
| 17705 |
|
| 17706 |
ecore_init_block(sc, BLOCK_BRB1, init_phase); |
| 17707 |
|
| 17708 |
if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { |
| 17709 |
if (IS_MF(sc)) { |
| 17710 |
low = (BXE_ONE_PORT(sc) ? 160 : 246); |
| 17711 |
} else if (sc->mtu > 4096) { |
| 17712 |
if (BXE_ONE_PORT(sc)) { |
| 17713 |
low = 160; |
| 17714 |
} else { |
| 17715 |
val = sc->mtu; |
| 17716 |
/* (24*1024 + val*4)/256 */ |
| 17717 |
low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); |
| 17718 |
} |
| 17719 |
} else { |
| 17720 |
low = (BXE_ONE_PORT(sc) ? 80 : 160); |
| 17721 |
} |
| 17722 |
high = (low + 56); /* 14*1024/256 */ |
| 17723 |
REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); |
| 17724 |
REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); |
| 17725 |
} |
| 17726 |
|
| 17727 |
if (CHIP_IS_MODE_4_PORT(sc)) { |
| 17728 |
REG_WR(sc, SC_PORT(sc) ? |
| 17729 |
BRB1_REG_MAC_GUARANTIED_1 : |
| 17730 |
BRB1_REG_MAC_GUARANTIED_0, 40); |
| 17731 |
} |
| 17732 |
|
| 17733 |
ecore_init_block(sc, BLOCK_PRS, init_phase); |
| 17734 |
if (CHIP_IS_E3B0(sc)) { |
| 17735 |
if (IS_MF_AFEX(sc)) { |
| 17736 |
/* configure headers for AFEX mode */ |
| 17737 |
REG_WR(sc, SC_PORT(sc) ? |
| 17738 |
PRS_REG_HDRS_AFTER_BASIC_PORT_1 : |
| 17739 |
PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); |
| 17740 |
REG_WR(sc, SC_PORT(sc) ? |
| 17741 |
PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : |
| 17742 |
PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); |
| 17743 |
REG_WR(sc, SC_PORT(sc) ? |
| 17744 |
PRS_REG_MUST_HAVE_HDRS_PORT_1 : |
| 17745 |
PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); |
| 17746 |
} else { |
| 17747 |
/* Ovlan exists only if we are in multi-function + |
| 17748 |
* switch-dependent mode, in switch-independent there |
| 17749 |
* is no ovlan headers |
| 17750 |
*/ |
| 17751 |
REG_WR(sc, SC_PORT(sc) ? |
| 17752 |
PRS_REG_HDRS_AFTER_BASIC_PORT_1 : |
| 17753 |
PRS_REG_HDRS_AFTER_BASIC_PORT_0, |
| 17754 |
(sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); |
| 17755 |
} |
| 17756 |
} |
| 17757 |
|
| 17758 |
ecore_init_block(sc, BLOCK_TSDM, init_phase); |
| 17759 |
ecore_init_block(sc, BLOCK_CSDM, init_phase); |
| 17760 |
ecore_init_block(sc, BLOCK_USDM, init_phase); |
| 17761 |
ecore_init_block(sc, BLOCK_XSDM, init_phase); |
| 17762 |
|
| 17763 |
ecore_init_block(sc, BLOCK_TSEM, init_phase); |
| 17764 |
ecore_init_block(sc, BLOCK_USEM, init_phase); |
| 17765 |
ecore_init_block(sc, BLOCK_CSEM, init_phase); |
| 17766 |
ecore_init_block(sc, BLOCK_XSEM, init_phase); |
| 17767 |
|
| 17768 |
ecore_init_block(sc, BLOCK_UPB, init_phase); |
| 17769 |
ecore_init_block(sc, BLOCK_XPB, init_phase); |
| 17770 |
|
| 17771 |
ecore_init_block(sc, BLOCK_PBF, init_phase); |
| 17772 |
|
| 17773 |
if (CHIP_IS_E1x(sc)) { |
| 17774 |
/* configure PBF to work without PAUSE mtu 9000 */ |
| 17775 |
REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); |
| 17776 |
|
| 17777 |
/* update threshold */ |
| 17778 |
REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); |
| 17779 |
/* update init credit */ |
| 17780 |
REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); |
| 17781 |
|
| 17782 |
/* probe changes */ |
| 17783 |
REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); |
| 17784 |
DELAY(50); |
| 17785 |
REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); |
| 17786 |
} |
| 17787 |
|
| 17788 |
if (CNIC_SUPPORT(sc)) { |
| 17789 |
ecore_init_block(sc, BLOCK_SRC, init_phase); |
| 17790 |
} |
| 17791 |
|
| 17792 |
ecore_init_block(sc, BLOCK_CDU, init_phase); |
| 17793 |
ecore_init_block(sc, BLOCK_CFC, init_phase); |
| 17794 |
|
| 17795 |
if (CHIP_IS_E1(sc)) { |
| 17796 |
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); |
| 17797 |
REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); |
| 17798 |
} |
| 17799 |
ecore_init_block(sc, BLOCK_HC, init_phase); |
| 17800 |
|
| 17801 |
ecore_init_block(sc, BLOCK_IGU, init_phase); |
| 17802 |
|
| 17803 |
ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); |
| 17804 |
/* init aeu_mask_attn_func_0/1: |
| 17805 |
* - SF mode: bits 3-7 are masked. only bits 0-2 are in use |
| 17806 |
* - MF mode: bit 3 is masked. bits 0-2 are in use as in SF |
| 17807 |
* bits 4-7 are used for "per vn group attention" */ |
| 17808 |
val = IS_MF(sc) ? 0xF7 : 0x7; |
| 17809 |
/* Enable DCBX attention for all but E1 */ |
| 17810 |
val |= CHIP_IS_E1(sc) ? 0 : 0x10; |
| 17811 |
REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); |
| 17812 |
|
| 17813 |
ecore_init_block(sc, BLOCK_NIG, init_phase); |
| 17814 |
|
| 17815 |
if (!CHIP_IS_E1x(sc)) { |
| 17816 |
/* Bit-map indicating which L2 hdrs may appear after the |
| 17817 |
* basic Ethernet header |
| 17818 |
*/ |
| 17819 |
if (IS_MF_AFEX(sc)) { |
| 17820 |
REG_WR(sc, SC_PORT(sc) ? |
| 17821 |
NIG_REG_P1_HDRS_AFTER_BASIC : |
| 17822 |
NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); |
| 17823 |
} else { |
| 17824 |
REG_WR(sc, SC_PORT(sc) ? |
| 17825 |
NIG_REG_P1_HDRS_AFTER_BASIC : |
| 17826 |
NIG_REG_P0_HDRS_AFTER_BASIC, |
| 17827 |
IS_MF_SD(sc) ? 7 : 6); |
| 17828 |
} |
| 17829 |
|
| 17830 |
if (CHIP_IS_E3(sc)) { |
| 17831 |
REG_WR(sc, SC_PORT(sc) ? |
| 17832 |
NIG_REG_LLH1_MF_MODE : |
| 17833 |
NIG_REG_LLH_MF_MODE, IS_MF(sc)); |
| 17834 |
} |
| 17835 |
} |
| 17836 |
if (!CHIP_IS_E3(sc)) { |
| 17837 |
REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); |
| 17838 |
} |
| 17839 |
|
| 17840 |
if (!CHIP_IS_E1(sc)) { |
| 17841 |
/* 0x2 disable mf_ov, 0x1 enable */ |
| 17842 |
REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, |
| 17843 |
(IS_MF_SD(sc) ? 0x1 : 0x2)); |
| 17844 |
|
| 17845 |
if (!CHIP_IS_E1x(sc)) { |
| 17846 |
val = 0; |
| 17847 |
switch (sc->devinfo.mf_info.mf_mode) { |
| 17848 |
case MULTI_FUNCTION_SD: |
| 17849 |
val = 1; |
| 17850 |
break; |
| 17851 |
case MULTI_FUNCTION_SI: |
| 17852 |
case MULTI_FUNCTION_AFEX: |
| 17853 |
val = 2; |
| 17854 |
break; |
| 17855 |
} |
| 17856 |
|
| 17857 |
REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : |
| 17858 |
NIG_REG_LLH0_CLS_TYPE), val); |
| 17859 |
} |
| 17860 |
REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); |
| 17861 |
REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); |
| 17862 |
REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); |
| 17863 |
} |
| 17864 |
|
| 17865 |
/* If SPIO5 is set to generate interrupts, enable it for this port */ |
| 17866 |
val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); |
| 17867 |
if (val & MISC_SPIO_SPIO5) { |
| 17868 |
uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
| 17869 |
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
| 17870 |
val = REG_RD(sc, reg_addr); |
| 17871 |
val |= AEU_INPUTS_ATTN_BITS_SPIO5; |
| 17872 |
REG_WR(sc, reg_addr, val); |
| 17873 |
} |
| 17874 |
|
| 17875 |
return (0); |
| 17876 |
} |
| 17877 |
|
| 17878 |
static uint32_t |
| 17879 |
bxe_flr_clnup_reg_poll(struct bxe_softc *sc, |
| 17880 |
uint32_t reg, |
| 17881 |
uint32_t expected, |
| 17882 |
uint32_t poll_count) |
| 17883 |
{ |
| 17884 |
uint32_t cur_cnt = poll_count; |
| 17885 |
uint32_t val; |
| 17886 |
|
| 17887 |
while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { |
| 17888 |
DELAY(FLR_WAIT_INTERVAL); |
| 17889 |
} |
| 17890 |
|
| 17891 |
return (val); |
| 17892 |
} |
| 17893 |
|
| 17894 |
static int |
| 17895 |
bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, |
| 17896 |
uint32_t reg, |
| 17897 |
char *msg, |
| 17898 |
uint32_t poll_cnt) |
| 17899 |
{ |
| 17900 |
uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); |
| 17901 |
|
| 17902 |
if (val != 0) { |
| 17903 |
BLOGE(sc, "%s usage count=%d\n", msg, val); |
| 17904 |
return (1); |
| 17905 |
} |
| 17906 |
|
| 17907 |
return (0); |
| 17908 |
} |
| 17909 |
|
| 17910 |
/* Common routines with VF FLR cleanup */ |
| 17911 |
static uint32_t |
| 17912 |
bxe_flr_clnup_poll_count(struct bxe_softc *sc) |
| 17913 |
{ |
| 17914 |
/* adjust polling timeout */ |
| 17915 |
if (CHIP_REV_IS_EMUL(sc)) { |
| 17916 |
return (FLR_POLL_CNT * 2000); |
| 17917 |
} |
| 17918 |
|
| 17919 |
if (CHIP_REV_IS_FPGA(sc)) { |
| 17920 |
return (FLR_POLL_CNT * 120); |
| 17921 |
} |
| 17922 |
|
| 17923 |
return (FLR_POLL_CNT); |
| 17924 |
} |
| 17925 |
|
| 17926 |
static int |
| 17927 |
bxe_poll_hw_usage_counters(struct bxe_softc *sc, |
| 17928 |
uint32_t poll_cnt) |
| 17929 |
{ |
| 17930 |
/* wait for CFC PF usage-counter to zero (includes all the VFs) */ |
| 17931 |
if (bxe_flr_clnup_poll_hw_counter(sc, |
| 17932 |
CFC_REG_NUM_LCIDS_INSIDE_PF, |
| 17933 |
"CFC PF usage counter timed out", |
| 17934 |
poll_cnt)) { |
| 17935 |
return (1); |
| 17936 |
} |
| 17937 |
|
| 17938 |
/* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ |
| 17939 |
if (bxe_flr_clnup_poll_hw_counter(sc, |
| 17940 |
DORQ_REG_PF_USAGE_CNT, |
| 17941 |
"DQ PF usage counter timed out", |
| 17942 |
poll_cnt)) { |
| 17943 |
return (1); |
| 17944 |
} |
| 17945 |
|
| 17946 |
/* Wait for QM PF usage-counter to zero (until DQ cleanup) */ |
| 17947 |
if (bxe_flr_clnup_poll_hw_counter(sc, |
| 17948 |
QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), |
| 17949 |
"QM PF usage counter timed out", |
| 17950 |
poll_cnt)) { |
| 17951 |
return (1); |
| 17952 |
} |
| 17953 |
|
| 17954 |
/* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ |
| 17955 |
if (bxe_flr_clnup_poll_hw_counter(sc, |
| 17956 |
TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), |
| 17957 |
"Timers VNIC usage counter timed out", |
| 17958 |
poll_cnt)) { |
| 17959 |
return (1); |
| 17960 |
} |
| 17961 |
|
| 17962 |
if (bxe_flr_clnup_poll_hw_counter(sc, |
| 17963 |
TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), |
| 17964 |
"Timers NUM_SCANS usage counter timed out", |
| 17965 |
poll_cnt)) { |
| 17966 |
return (1); |
| 17967 |
} |
| 17968 |
|
| 17969 |
/* Wait DMAE PF usage counter to zero */ |
| 17970 |
if (bxe_flr_clnup_poll_hw_counter(sc, |
| 17971 |
dmae_reg_go_c[INIT_DMAE_C(sc)], |
| 17972 |
"DMAE dommand register timed out", |
| 17973 |
poll_cnt)) { |
| 17974 |
return (1); |
| 17975 |
} |
| 17976 |
|
| 17977 |
return (0); |
| 17978 |
} |
| 17979 |
|
| 17980 |
#define OP_GEN_PARAM(param) \ |
| 17981 |
(((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) |
| 17982 |
#define OP_GEN_TYPE(type) \ |
| 17983 |
(((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) |
| 17984 |
#define OP_GEN_AGG_VECT(index) \ |
| 17985 |
(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) |
| 17986 |
|
| 17987 |
static int |
| 17988 |
bxe_send_final_clnup(struct bxe_softc *sc, |
| 17989 |
uint8_t clnup_func, |
| 17990 |
uint32_t poll_cnt) |
| 17991 |
{ |
| 17992 |
uint32_t op_gen_command = 0; |
| 17993 |
uint32_t comp_addr = (BAR_CSTRORM_INTMEM + |
| 17994 |
CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); |
| 17995 |
int ret = 0; |
| 17996 |
|
| 17997 |
if (REG_RD(sc, comp_addr)) { |
| 17998 |
BLOGE(sc, "Cleanup complete was not 0 before sending\n"); |
| 17999 |
return (1); |
| 18000 |
} |
| 18001 |
|
| 18002 |
op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); |
| 18003 |
op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); |
| 18004 |
op_gen_command |= OP_GEN_AGG_VECT(clnup_func); |
| 18005 |
op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; |
| 18006 |
|
| 18007 |
BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); |
| 18008 |
REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); |
| 18009 |
|
| 18010 |
if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { |
| 18011 |
BLOGE(sc, "FW final cleanup did not succeed\n"); |
| 18012 |
BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", |
| 18013 |
(REG_RD(sc, comp_addr))); |
| 18014 |
bxe_panic(sc, ("FLR cleanup failed\n")); |
| 18015 |
return (1); |
| 18016 |
} |
| 18017 |
|
| 18018 |
/* Zero completion for nxt FLR */ |
| 18019 |
REG_WR(sc, comp_addr, 0); |
| 18020 |
|
| 18021 |
return (ret); |
| 18022 |
} |
| 18023 |
|
| 18024 |
static void |
| 18025 |
bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, |
| 18026 |
struct pbf_pN_buf_regs *regs, |
| 18027 |
uint32_t poll_count) |
| 18028 |
{ |
| 18029 |
uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; |
| 18030 |
uint32_t cur_cnt = poll_count; |
| 18031 |
|
| 18032 |
crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); |
| 18033 |
crd = crd_start = REG_RD(sc, regs->crd); |
| 18034 |
init_crd = REG_RD(sc, regs->init_crd); |
| 18035 |
|
| 18036 |
BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); |
| 18037 |
BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); |
| 18038 |
BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); |
| 18039 |
|
| 18040 |
while ((crd != init_crd) && |
| 18041 |
((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < |
| 18042 |
(init_crd - crd_start))) { |
| 18043 |
if (cur_cnt--) { |
| 18044 |
DELAY(FLR_WAIT_INTERVAL); |
| 18045 |
crd = REG_RD(sc, regs->crd); |
| 18046 |
crd_freed = REG_RD(sc, regs->crd_freed); |
| 18047 |
} else { |
| 18048 |
BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); |
| 18049 |
BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); |
| 18050 |
BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); |
| 18051 |
break; |
| 18052 |
} |
| 18053 |
} |
| 18054 |
|
| 18055 |
BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", |
| 18056 |
poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); |
| 18057 |
} |
| 18058 |
|
| 18059 |
static void |
| 18060 |
bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, |
| 18061 |
struct pbf_pN_cmd_regs *regs, |
| 18062 |
uint32_t poll_count) |
| 18063 |
{ |
| 18064 |
uint32_t occup, to_free, freed, freed_start; |
| 18065 |
uint32_t cur_cnt = poll_count; |
| 18066 |
|
| 18067 |
occup = to_free = REG_RD(sc, regs->lines_occup); |
| 18068 |
freed = freed_start = REG_RD(sc, regs->lines_freed); |
| 18069 |
|
| 18070 |
BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); |
| 18071 |
BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); |
| 18072 |
|
| 18073 |
while (occup && |
| 18074 |
((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { |
| 18075 |
if (cur_cnt--) { |
| 18076 |
DELAY(FLR_WAIT_INTERVAL); |
| 18077 |
occup = REG_RD(sc, regs->lines_occup); |
| 18078 |
freed = REG_RD(sc, regs->lines_freed); |
| 18079 |
} else { |
| 18080 |
BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); |
| 18081 |
BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); |
| 18082 |
BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); |
| 18083 |
break; |
| 18084 |
} |
| 18085 |
} |
| 18086 |
|
| 18087 |
BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", |
| 18088 |
poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); |
| 18089 |
} |
| 18090 |
|
| 18091 |
static void |
| 18092 |
bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) |
| 18093 |
{ |
| 18094 |
struct pbf_pN_cmd_regs cmd_regs[] = { |
| 18095 |
{0, (CHIP_IS_E3B0(sc)) ? |
| 18096 |
PBF_REG_TQ_OCCUPANCY_Q0 : |
| 18097 |
PBF_REG_P0_TQ_OCCUPANCY, |
| 18098 |
(CHIP_IS_E3B0(sc)) ? |
| 18099 |
PBF_REG_TQ_LINES_FREED_CNT_Q0 : |
| 18100 |
PBF_REG_P0_TQ_LINES_FREED_CNT}, |
| 18101 |
{1, (CHIP_IS_E3B0(sc)) ? |
| 18102 |
PBF_REG_TQ_OCCUPANCY_Q1 : |
| 18103 |
PBF_REG_P1_TQ_OCCUPANCY, |
| 18104 |
(CHIP_IS_E3B0(sc)) ? |
| 18105 |
PBF_REG_TQ_LINES_FREED_CNT_Q1 : |
| 18106 |
PBF_REG_P1_TQ_LINES_FREED_CNT}, |
| 18107 |
{4, (CHIP_IS_E3B0(sc)) ? |
| 18108 |
PBF_REG_TQ_OCCUPANCY_LB_Q : |
| 18109 |
PBF_REG_P4_TQ_OCCUPANCY, |
| 18110 |
(CHIP_IS_E3B0(sc)) ? |
| 18111 |
PBF_REG_TQ_LINES_FREED_CNT_LB_Q : |
| 18112 |
PBF_REG_P4_TQ_LINES_FREED_CNT} |
| 18113 |
}; |
| 18114 |
|
| 18115 |
struct pbf_pN_buf_regs buf_regs[] = { |
| 18116 |
{0, (CHIP_IS_E3B0(sc)) ? |
| 18117 |
PBF_REG_INIT_CRD_Q0 : |
| 18118 |
PBF_REG_P0_INIT_CRD , |
| 18119 |
(CHIP_IS_E3B0(sc)) ? |
| 18120 |
PBF_REG_CREDIT_Q0 : |
| 18121 |
PBF_REG_P0_CREDIT, |
| 18122 |
(CHIP_IS_E3B0(sc)) ? |
| 18123 |
PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : |
| 18124 |
PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, |
| 18125 |
{1, (CHIP_IS_E3B0(sc)) ? |
| 18126 |
PBF_REG_INIT_CRD_Q1 : |
| 18127 |
PBF_REG_P1_INIT_CRD, |
| 18128 |
(CHIP_IS_E3B0(sc)) ? |
| 18129 |
PBF_REG_CREDIT_Q1 : |
| 18130 |
PBF_REG_P1_CREDIT, |
| 18131 |
(CHIP_IS_E3B0(sc)) ? |
| 18132 |
PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : |
| 18133 |
PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, |
| 18134 |
{4, (CHIP_IS_E3B0(sc)) ? |
| 18135 |
PBF_REG_INIT_CRD_LB_Q : |
| 18136 |
PBF_REG_P4_INIT_CRD, |
| 18137 |
(CHIP_IS_E3B0(sc)) ? |
| 18138 |
PBF_REG_CREDIT_LB_Q : |
| 18139 |
PBF_REG_P4_CREDIT, |
| 18140 |
(CHIP_IS_E3B0(sc)) ? |
| 18141 |
PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : |
| 18142 |
PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, |
| 18143 |
}; |
| 18144 |
|
| 18145 |
int i; |
| 18146 |
|
| 18147 |
/* Verify the command queues are flushed P0, P1, P4 */ |
| 18148 |
for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { |
| 18149 |
bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); |
| 18150 |
} |
| 18151 |
|
| 18152 |
/* Verify the transmission buffers are flushed P0, P1, P4 */ |
| 18153 |
for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { |
| 18154 |
bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); |
| 18155 |
} |
| 18156 |
} |
| 18157 |
|
| 18158 |
static void |
| 18159 |
bxe_hw_enable_status(struct bxe_softc *sc) |
| 18160 |
{ |
| 18161 |
uint32_t val; |
| 18162 |
|
| 18163 |
val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); |
| 18164 |
BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); |
| 18165 |
|
| 18166 |
val = REG_RD(sc, PBF_REG_DISABLE_PF); |
| 18167 |
BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); |
| 18168 |
|
| 18169 |
val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); |
| 18170 |
BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); |
| 18171 |
|
| 18172 |
val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); |
| 18173 |
BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); |
| 18174 |
|
| 18175 |
val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); |
| 18176 |
BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); |
| 18177 |
|
| 18178 |
val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); |
| 18179 |
BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); |
| 18180 |
|
| 18181 |
val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); |
| 18182 |
BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); |
| 18183 |
|
| 18184 |
val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); |
| 18185 |
BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); |
| 18186 |
} |
| 18187 |
|
| 18188 |
static int |
| 18189 |
bxe_pf_flr_clnup(struct bxe_softc *sc) |
| 18190 |
{ |
| 18191 |
uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); |
| 18192 |
|
| 18193 |
BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); |
| 18194 |
|
| 18195 |
/* Re-enable PF target read access */ |
| 18196 |
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); |
| 18197 |
|
| 18198 |
/* Poll HW usage counters */ |
| 18199 |
BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); |
| 18200 |
if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { |
| 18201 |
return (-1); |
| 18202 |
} |
| 18203 |
|
| 18204 |
/* Zero the igu 'trailing edge' and 'leading edge' */ |
| 18205 |
|
| 18206 |
/* Send the FW cleanup command */ |
| 18207 |
if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { |
| 18208 |
return (-1); |
| 18209 |
} |
| 18210 |
|
| 18211 |
/* ATC cleanup */ |
| 18212 |
|
| 18213 |
/* Verify TX hw is flushed */ |
| 18214 |
bxe_tx_hw_flushed(sc, poll_cnt); |
| 18215 |
|
| 18216 |
/* Wait 100ms (not adjusted according to platform) */ |
| 18217 |
DELAY(100000); |
| 18218 |
|
| 18219 |
/* Verify no pending pci transactions */ |
| 18220 |
if (bxe_is_pcie_pending(sc)) { |
| 18221 |
BLOGE(sc, "PCIE Transactions still pending\n"); |
| 18222 |
} |
| 18223 |
|
| 18224 |
/* Debug */ |
| 18225 |
bxe_hw_enable_status(sc); |
| 18226 |
|
| 18227 |
/* |
| 18228 |
* Master enable - Due to WB DMAE writes performed before this |
| 18229 |
* register is re-initialized as part of the regular function init |
| 18230 |
*/ |
| 18231 |
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); |
| 18232 |
|
| 18233 |
return (0); |
| 18234 |
} |
| 18235 |
|
| 18236 |
#if 0 |
| 18237 |
static void |
| 18238 |
bxe_init_searcher(struct bxe_softc *sc) |
| 18239 |
{ |
| 18240 |
int port = SC_PORT(sc); |
| 18241 |
ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM); |
| 18242 |
/* T1 hash bits value determines the T1 number of entries */ |
| 18243 |
REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); |
| 18244 |
} |
| 18245 |
#endif |
| 18246 |
|
| 18247 |
static int |
| 18248 |
bxe_init_hw_func(struct bxe_softc *sc) |
| 18249 |
{ |
| 18250 |
int port = SC_PORT(sc); |
| 18251 |
int func = SC_FUNC(sc); |
| 18252 |
int init_phase = PHASE_PF0 + func; |
| 18253 |
struct ecore_ilt *ilt = sc->ilt; |
| 18254 |
uint16_t cdu_ilt_start; |
| 18255 |
uint32_t addr, val; |
| 18256 |
uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; |
| 18257 |
int i, main_mem_width, rc; |
| 18258 |
|
| 18259 |
BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); |
| 18260 |
|
| 18261 |
/* FLR cleanup */ |
| 18262 |
if (!CHIP_IS_E1x(sc)) { |
| 18263 |
rc = bxe_pf_flr_clnup(sc); |
| 18264 |
if (rc) { |
| 18265 |
BLOGE(sc, "FLR cleanup failed!\n"); |
| 18266 |
// XXX bxe_fw_dump(sc); |
| 18267 |
// XXX bxe_idle_chk(sc); |
| 18268 |
return (rc); |
| 18269 |
} |
| 18270 |
} |
| 18271 |
|
| 18272 |
/* set MSI reconfigure capability */ |
| 18273 |
if (sc->devinfo.int_block == INT_BLOCK_HC) { |
| 18274 |
addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); |
| 18275 |
val = REG_RD(sc, addr); |
| 18276 |
val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; |
| 18277 |
REG_WR(sc, addr, val); |
| 18278 |
} |
| 18279 |
|
| 18280 |
ecore_init_block(sc, BLOCK_PXP, init_phase); |
| 18281 |
ecore_init_block(sc, BLOCK_PXP2, init_phase); |
| 18282 |
|
| 18283 |
ilt = sc->ilt; |
| 18284 |
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; |
| 18285 |
|
| 18286 |
#if 0 |
| 18287 |
if (IS_SRIOV(sc)) { |
| 18288 |
cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS; |
| 18289 |
} |
| 18290 |
cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start); |
| 18291 |
|
| 18292 |
#if (BXE_FIRST_VF_CID > 0) |
| 18293 |
/* |
| 18294 |
* If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes |
| 18295 |
* those of the VFs, so start line should be reset |
| 18296 |
*/ |
| 18297 |
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; |
| 18298 |
#endif |
| 18299 |
#endif |
| 18300 |
|
| 18301 |
for (i = 0; i < L2_ILT_LINES(sc); i++) { |
| 18302 |
ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; |
| 18303 |
ilt->lines[cdu_ilt_start + i].page_mapping = |
| 18304 |
sc->context[i].vcxt_dma.paddr; |
| 18305 |
ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; |
| 18306 |
} |
| 18307 |
ecore_ilt_init_op(sc, INITOP_SET); |
| 18308 |
|
| 18309 |
#if 0 |
| 18310 |
if (!CONFIGURE_NIC_MODE(sc)) { |
| 18311 |
bxe_init_searcher(sc); |
| 18312 |
REG_WR(sc, PRS_REG_NIC_MODE, 0); |
| 18313 |
BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n"); |
| 18314 |
} else |
| 18315 |
#endif |
| 18316 |
{ |
| 18317 |
/* Set NIC mode */ |
| 18318 |
REG_WR(sc, PRS_REG_NIC_MODE, 1); |
| 18319 |
BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); |
| 18320 |
} |
| 18321 |
|
| 18322 |
if (!CHIP_IS_E1x(sc)) { |
| 18323 |
uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; |
| 18324 |
|
| 18325 |
/* Turn on a single ISR mode in IGU if driver is going to use |
| 18326 |
* INT#x or MSI |
| 18327 |
*/ |
| 18328 |
if (sc->interrupt_mode != INTR_MODE_MSIX) { |
| 18329 |
pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; |
| 18330 |
} |
| 18331 |
|
| 18332 |
/* |
| 18333 |
* Timers workaround bug: function init part. |
| 18334 |
* Need to wait 20msec after initializing ILT, |
| 18335 |
* needed to make sure there are no requests in |
| 18336 |
* one of the PXP internal queues with "old" ILT addresses |
| 18337 |
*/ |
| 18338 |
DELAY(20000); |
| 18339 |
|
| 18340 |
/* |
| 18341 |
* Master enable - Due to WB DMAE writes performed before this |
| 18342 |
* register is re-initialized as part of the regular function |
| 18343 |
* init |
| 18344 |
*/ |
| 18345 |
REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); |
| 18346 |
/* Enable the function in IGU */ |
| 18347 |
REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); |
| 18348 |
} |
| 18349 |
|
| 18350 |
sc->dmae_ready = 1; |
| 18351 |
|
| 18352 |
ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); |
| 18353 |
|
| 18354 |
if (!CHIP_IS_E1x(sc)) |
| 18355 |
REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); |
| 18356 |
|
| 18357 |
ecore_init_block(sc, BLOCK_ATC, init_phase); |
| 18358 |
ecore_init_block(sc, BLOCK_DMAE, init_phase); |
| 18359 |
ecore_init_block(sc, BLOCK_NIG, init_phase); |
| 18360 |
ecore_init_block(sc, BLOCK_SRC, init_phase); |
| 18361 |
ecore_init_block(sc, BLOCK_MISC, init_phase); |
| 18362 |
ecore_init_block(sc, BLOCK_TCM, init_phase); |
| 18363 |
ecore_init_block(sc, BLOCK_UCM, init_phase); |
| 18364 |
ecore_init_block(sc, BLOCK_CCM, init_phase); |
| 18365 |
ecore_init_block(sc, BLOCK_XCM, init_phase); |
| 18366 |
ecore_init_block(sc, BLOCK_TSEM, init_phase); |
| 18367 |
ecore_init_block(sc, BLOCK_USEM, init_phase); |
| 18368 |
ecore_init_block(sc, BLOCK_CSEM, init_phase); |
| 18369 |
ecore_init_block(sc, BLOCK_XSEM, init_phase); |
| 18370 |
|
| 18371 |
if (!CHIP_IS_E1x(sc)) |
| 18372 |
REG_WR(sc, QM_REG_PF_EN, 1); |
| 18373 |
|
| 18374 |
if (!CHIP_IS_E1x(sc)) { |
| 18375 |
REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); |
| 18376 |
REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); |
| 18377 |
REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); |
| 18378 |
REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); |
| 18379 |
} |
| 18380 |
ecore_init_block(sc, BLOCK_QM, init_phase); |
| 18381 |
|
| 18382 |
ecore_init_block(sc, BLOCK_TM, init_phase); |
| 18383 |
ecore_init_block(sc, BLOCK_DORQ, init_phase); |
| 18384 |
|
| 18385 |
bxe_iov_init_dq(sc); |
| 18386 |
|
| 18387 |
ecore_init_block(sc, BLOCK_BRB1, init_phase); |
| 18388 |
ecore_init_block(sc, BLOCK_PRS, init_phase); |
| 18389 |
ecore_init_block(sc, BLOCK_TSDM, init_phase); |
| 18390 |
ecore_init_block(sc, BLOCK_CSDM, init_phase); |
| 18391 |
ecore_init_block(sc, BLOCK_USDM, init_phase); |
| 18392 |
ecore_init_block(sc, BLOCK_XSDM, init_phase); |
| 18393 |
ecore_init_block(sc, BLOCK_UPB, init_phase); |
| 18394 |
ecore_init_block(sc, BLOCK_XPB, init_phase); |
| 18395 |
ecore_init_block(sc, BLOCK_PBF, init_phase); |
| 18396 |
if (!CHIP_IS_E1x(sc)) |
| 18397 |
REG_WR(sc, PBF_REG_DISABLE_PF, 0); |
| 18398 |
|
| 18399 |
ecore_init_block(sc, BLOCK_CDU, init_phase); |
| 18400 |
|
| 18401 |
ecore_init_block(sc, BLOCK_CFC, init_phase); |
| 18402 |
|
| 18403 |
if (!CHIP_IS_E1x(sc)) |
| 18404 |
REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); |
| 18405 |
|
| 18406 |
if (IS_MF(sc)) { |
| 18407 |
REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); |
| 18408 |
REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); |
| 18409 |
} |
| 18410 |
|
| 18411 |
ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); |
| 18412 |
|
| 18413 |
/* HC init per function */ |
| 18414 |
if (sc->devinfo.int_block == INT_BLOCK_HC) { |
| 18415 |
if (CHIP_IS_E1H(sc)) { |
| 18416 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); |
| 18417 |
|
| 18418 |
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); |
| 18419 |
REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); |
| 18420 |
} |
| 18421 |
ecore_init_block(sc, BLOCK_HC, init_phase); |
| 18422 |
|
| 18423 |
} else { |
| 18424 |
int num_segs, sb_idx, prod_offset; |
| 18425 |
|
| 18426 |
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); |
| 18427 |
|
| 18428 |
if (!CHIP_IS_E1x(sc)) { |
| 18429 |
REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); |
| 18430 |
REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); |
| 18431 |
} |
| 18432 |
|
| 18433 |
ecore_init_block(sc, BLOCK_IGU, init_phase); |
| 18434 |
|
| 18435 |
if (!CHIP_IS_E1x(sc)) { |
| 18436 |
int dsb_idx = 0; |
| 18437 |
/** |
| 18438 |
* Producer memory: |
| 18439 |
* E2 mode: address 0-135 match to the mapping memory; |
| 18440 |
* 136 - PF0 default prod; 137 - PF1 default prod; |
| 18441 |
* 138 - PF2 default prod; 139 - PF3 default prod; |
| 18442 |
* 140 - PF0 attn prod; 141 - PF1 attn prod; |
| 18443 |
* 142 - PF2 attn prod; 143 - PF3 attn prod; |
| 18444 |
* 144-147 reserved. |
| 18445 |
* |
| 18446 |
* E1.5 mode - In backward compatible mode; |
| 18447 |
* for non default SB; each even line in the memory |
| 18448 |
* holds the U producer and each odd line hold |
| 18449 |
* the C producer. The first 128 producers are for |
| 18450 |
* NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 |
| 18451 |
* producers are for the DSB for each PF. |
| 18452 |
* Each PF has five segments: (the order inside each |
| 18453 |
* segment is PF0; PF1; PF2; PF3) - 128-131 U prods; |
| 18454 |
* 132-135 C prods; 136-139 X prods; 140-143 T prods; |
| 18455 |
* 144-147 attn prods; |
| 18456 |
*/ |
| 18457 |
/* non-default-status-blocks */ |
| 18458 |
num_segs = CHIP_INT_MODE_IS_BC(sc) ? |
| 18459 |
IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; |
| 18460 |
for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { |
| 18461 |
prod_offset = (sc->igu_base_sb + sb_idx) * |
| 18462 |
num_segs; |
| 18463 |
|
| 18464 |
for (i = 0; i < num_segs; i++) { |
| 18465 |
addr = IGU_REG_PROD_CONS_MEMORY + |
| 18466 |
(prod_offset + i) * 4; |
| 18467 |
REG_WR(sc, addr, 0); |
| 18468 |
} |
| 18469 |
/* send consumer update with value 0 */ |
| 18470 |
bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, |
| 18471 |
USTORM_ID, 0, IGU_INT_NOP, 1); |
| 18472 |
bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); |
| 18473 |
} |
| 18474 |
|
| 18475 |
/* default-status-blocks */ |
| 18476 |
num_segs = CHIP_INT_MODE_IS_BC(sc) ? |
| 18477 |
IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; |
| 18478 |
|
| 18479 |
if (CHIP_IS_MODE_4_PORT(sc)) |
| 18480 |
dsb_idx = SC_FUNC(sc); |
| 18481 |
else |
| 18482 |
dsb_idx = SC_VN(sc); |
| 18483 |
|
| 18484 |
prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? |
| 18485 |
IGU_BC_BASE_DSB_PROD + dsb_idx : |
| 18486 |
IGU_NORM_BASE_DSB_PROD + dsb_idx); |
| 18487 |
|
| 18488 |
/* |
| 18489 |
* igu prods come in chunks of E1HVN_MAX (4) - |
| 18490 |
* does not matters what is the current chip mode |
| 18491 |
*/ |
| 18492 |
for (i = 0; i < (num_segs * E1HVN_MAX); |
| 18493 |
i += E1HVN_MAX) { |
| 18494 |
addr = IGU_REG_PROD_CONS_MEMORY + |
| 18495 |
(prod_offset + i)*4; |
| 18496 |
REG_WR(sc, addr, 0); |
| 18497 |
} |
| 18498 |
/* send consumer update with 0 */ |
| 18499 |
if (CHIP_INT_MODE_IS_BC(sc)) { |
| 18500 |
bxe_ack_sb(sc, sc->igu_dsb_id, |
| 18501 |
USTORM_ID, 0, IGU_INT_NOP, 1); |
| 18502 |
bxe_ack_sb(sc, sc->igu_dsb_id, |
| 18503 |
CSTORM_ID, 0, IGU_INT_NOP, 1); |
| 18504 |
bxe_ack_sb(sc, sc->igu_dsb_id, |
| 18505 |
XSTORM_ID, 0, IGU_INT_NOP, 1); |
| 18506 |
bxe_ack_sb(sc, sc->igu_dsb_id, |
| 18507 |
TSTORM_ID, 0, IGU_INT_NOP, 1); |
| 18508 |
bxe_ack_sb(sc, sc->igu_dsb_id, |
| 18509 |
ATTENTION_ID, 0, IGU_INT_NOP, 1); |
| 18510 |
} else { |
| 18511 |
bxe_ack_sb(sc, sc->igu_dsb_id, |
| 18512 |
USTORM_ID, 0, IGU_INT_NOP, 1); |
| 18513 |
bxe_ack_sb(sc, sc->igu_dsb_id, |
| 18514 |
ATTENTION_ID, 0, IGU_INT_NOP, 1); |
| 18515 |
} |
| 18516 |
bxe_igu_clear_sb(sc, sc->igu_dsb_id); |
| 18517 |
|
| 18518 |
/* !!! these should become driver const once |
| 18519 |
rf-tool supports split-68 const */ |
| 18520 |
REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); |
| 18521 |
REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); |
| 18522 |
REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); |
| 18523 |
REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); |
| 18524 |
REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); |
| 18525 |
REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); |
| 18526 |
} |
| 18527 |
} |
| 18528 |
|
| 18529 |
/* Reset PCIE errors for debug */ |
| 18530 |
REG_WR(sc, 0x2114, 0xffffffff); |
| 18531 |
REG_WR(sc, 0x2120, 0xffffffff); |
| 18532 |
|
| 18533 |
if (CHIP_IS_E1x(sc)) { |
| 18534 |
main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ |
| 18535 |
main_mem_base = HC_REG_MAIN_MEMORY + |
| 18536 |
SC_PORT(sc) * (main_mem_size * 4); |
| 18537 |
main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; |
| 18538 |
main_mem_width = 8; |
| 18539 |
|
| 18540 |
val = REG_RD(sc, main_mem_prty_clr); |
| 18541 |
if (val) { |
| 18542 |
BLOGD(sc, DBG_LOAD, |
| 18543 |
"Parity errors in HC block during function init (0x%x)!\n", |
| 18544 |
val); |
| 18545 |
} |
| 18546 |
|
| 18547 |
/* Clear "false" parity errors in MSI-X table */ |
| 18548 |
for (i = main_mem_base; |
| 18549 |
i < main_mem_base + main_mem_size * 4; |
| 18550 |
i += main_mem_width) { |
| 18551 |
bxe_read_dmae(sc, i, main_mem_width / 4); |
| 18552 |
bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), |
| 18553 |
i, main_mem_width / 4); |
| 18554 |
} |
| 18555 |
/* Clear HC parity attention */ |
| 18556 |
REG_RD(sc, main_mem_prty_clr); |
| 18557 |
} |
| 18558 |
|
| 18559 |
#if 1 |
| 18560 |
/* Enable STORMs SP logging */ |
| 18561 |
REG_WR8(sc, BAR_USTRORM_INTMEM + |
| 18562 |
USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); |
| 18563 |
REG_WR8(sc, BAR_TSTRORM_INTMEM + |
| 18564 |
TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); |
| 18565 |
REG_WR8(sc, BAR_CSTRORM_INTMEM + |
| 18566 |
CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); |
| 18567 |
REG_WR8(sc, BAR_XSTRORM_INTMEM + |
| 18568 |
XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); |
| 18569 |
#endif |
| 18570 |
|
| 18571 |
elink_phy_probe(&sc->link_params); |
| 18572 |
|
| 18573 |
return (0); |
| 18574 |
} |
| 18575 |
|
| 18576 |
static void |
| 18577 |
bxe_link_reset(struct bxe_softc *sc) |
| 18578 |
{ |
| 18579 |
if (!BXE_NOMCP(sc)) { |
| 18580 |
BXE_PHY_LOCK(sc); |
| 18581 |
elink_lfa_reset(&sc->link_params, &sc->link_vars); |
| 18582 |
BXE_PHY_UNLOCK(sc); |
| 18583 |
} else { |
| 18584 |
if (!CHIP_REV_IS_SLOW(sc)) { |
| 18585 |
BLOGW(sc, "Bootcode is missing - cannot reset link\n"); |
| 18586 |
} |
| 18587 |
} |
| 18588 |
} |
| 18589 |
|
| 18590 |
static void |
| 18591 |
bxe_reset_port(struct bxe_softc *sc) |
| 18592 |
{ |
| 18593 |
int port = SC_PORT(sc); |
| 18594 |
uint32_t val; |
| 18595 |
|
| 18596 |
/* reset physical Link */ |
| 18597 |
bxe_link_reset(sc); |
| 18598 |
|
| 18599 |
REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); |
| 18600 |
|
| 18601 |
/* Do not rcv packets to BRB */ |
| 18602 |
REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); |
| 18603 |
/* Do not direct rcv packets that are not for MCP to the BRB */ |
| 18604 |
REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : |
| 18605 |
NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); |
| 18606 |
|
| 18607 |
/* Configure AEU */ |
| 18608 |
REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); |
| 18609 |
|
| 18610 |
DELAY(100000); |
| 18611 |
|
| 18612 |
/* Check for BRB port occupancy */ |
| 18613 |
val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); |
| 18614 |
if (val) { |
| 18615 |
BLOGD(sc, DBG_LOAD, |
| 18616 |
"BRB1 is not empty, %d blocks are occupied\n", val); |
| 18617 |
} |
| 18618 |
|
| 18619 |
/* TODO: Close Doorbell port? */ |
| 18620 |
} |
| 18621 |
|
| 18622 |
static void |
| 18623 |
bxe_ilt_wr(struct bxe_softc *sc, |
| 18624 |
uint32_t index, |
| 18625 |
bus_addr_t addr) |
| 18626 |
{ |
| 18627 |
int reg; |
| 18628 |
uint32_t wb_write[2]; |
| 18629 |
|
| 18630 |
if (CHIP_IS_E1(sc)) { |
| 18631 |
reg = PXP2_REG_RQ_ONCHIP_AT + index*8; |
| 18632 |
} else { |
| 18633 |
reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; |
| 18634 |
} |
| 18635 |
|
| 18636 |
wb_write[0] = ONCHIP_ADDR1(addr); |
| 18637 |
wb_write[1] = ONCHIP_ADDR2(addr); |
| 18638 |
REG_WR_DMAE(sc, reg, wb_write, 2); |
| 18639 |
} |
| 18640 |
|
| 18641 |
static void |
| 18642 |
bxe_clear_func_ilt(struct bxe_softc *sc, |
| 18643 |
uint32_t func) |
| 18644 |
{ |
| 18645 |
uint32_t i, base = FUNC_ILT_BASE(func); |
| 18646 |
for (i = base; i < base + ILT_PER_FUNC; i++) { |
| 18647 |
bxe_ilt_wr(sc, i, 0); |
| 18648 |
} |
| 18649 |
} |
| 18650 |
|
| 18651 |
static void |
| 18652 |
bxe_reset_func(struct bxe_softc *sc) |
| 18653 |
{ |
| 18654 |
struct bxe_fastpath *fp; |
| 18655 |
int port = SC_PORT(sc); |
| 18656 |
int func = SC_FUNC(sc); |
| 18657 |
int i; |
| 18658 |
|
| 18659 |
/* Disable the function in the FW */ |
| 18660 |
REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); |
| 18661 |
REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); |
| 18662 |
REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); |
| 18663 |
REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); |
| 18664 |
|
| 18665 |
/* FP SBs */ |
| 18666 |
FOR_EACH_ETH_QUEUE(sc, i) { |
| 18667 |
fp = &sc->fp[i]; |
| 18668 |
REG_WR8(sc, BAR_CSTRORM_INTMEM + |
| 18669 |
CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), |
| 18670 |
SB_DISABLED); |
| 18671 |
} |
| 18672 |
|
| 18673 |
#if 0 |
| 18674 |
if (CNIC_LOADED(sc)) { |
| 18675 |
/* CNIC SB */ |
| 18676 |
REG_WR8(sc, BAR_CSTRORM_INTMEM + |
| 18677 |
CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET |
| 18678 |
(bxe_cnic_fw_sb_id(sc)), SB_DISABLED); |
| 18679 |
} |
| 18680 |
#endif |
| 18681 |
|
| 18682 |
/* SP SB */ |
| 18683 |
REG_WR8(sc, BAR_CSTRORM_INTMEM + |
| 18684 |
CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), |
| 18685 |
SB_DISABLED); |
| 18686 |
|
| 18687 |
for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { |
| 18688 |
REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); |
| 18689 |
} |
| 18690 |
|
| 18691 |
/* Configure IGU */ |
| 18692 |
if (sc->devinfo.int_block == INT_BLOCK_HC) { |
| 18693 |
REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); |
| 18694 |
REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); |
| 18695 |
} else { |
| 18696 |
REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); |
| 18697 |
REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); |
| 18698 |
} |
| 18699 |
|
| 18700 |
if (CNIC_LOADED(sc)) { |
| 18701 |
/* Disable Timer scan */ |
| 18702 |
REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); |
| 18703 |
/* |
| 18704 |
* Wait for at least 10ms and up to 2 second for the timers |
| 18705 |
* scan to complete |
| 18706 |
*/ |
| 18707 |
for (i = 0; i < 200; i++) { |
| 18708 |
DELAY(10000); |
| 18709 |
if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) |
| 18710 |
break; |
| 18711 |
} |
| 18712 |
} |
| 18713 |
|
| 18714 |
/* Clear ILT */ |
| 18715 |
bxe_clear_func_ilt(sc, func); |
| 18716 |
|
| 18717 |
/* |
| 18718 |
* Timers workaround bug for E2: if this is vnic-3, |
| 18719 |
* we need to set the entire ilt range for this timers. |
| 18720 |
*/ |
| 18721 |
if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { |
| 18722 |
struct ilt_client_info ilt_cli; |
| 18723 |
/* use dummy TM client */ |
| 18724 |
memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); |
| 18725 |
ilt_cli.start = 0; |
| 18726 |
ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; |
| 18727 |
ilt_cli.client_num = ILT_CLIENT_TM; |
| 18728 |
|
| 18729 |
ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); |
| 18730 |
} |
| 18731 |
|
| 18732 |
/* this assumes that reset_port() called before reset_func()*/ |
| 18733 |
if (!CHIP_IS_E1x(sc)) { |
| 18734 |
bxe_pf_disable(sc); |
| 18735 |
} |
| 18736 |
|
| 18737 |
sc->dmae_ready = 0; |
| 18738 |
} |
| 18739 |
|
| 18740 |
static int |
| 18741 |
bxe_gunzip_init(struct bxe_softc *sc) |
| 18742 |
{ |
| 18743 |
return (0); |
| 18744 |
} |
| 18745 |
|
| 18746 |
static void |
| 18747 |
bxe_gunzip_end(struct bxe_softc *sc) |
| 18748 |
{ |
| 18749 |
return; |
| 18750 |
} |
| 18751 |
|
| 18752 |
static int |
| 18753 |
bxe_init_firmware(struct bxe_softc *sc) |
| 18754 |
{ |
| 18755 |
if (CHIP_IS_E1(sc)) { |
| 18756 |
ecore_init_e1_firmware(sc); |
| 18757 |
sc->iro_array = e1_iro_arr; |
| 18758 |
} else if (CHIP_IS_E1H(sc)) { |
| 18759 |
ecore_init_e1h_firmware(sc); |
| 18760 |
sc->iro_array = e1h_iro_arr; |
| 18761 |
} else if (!CHIP_IS_E1x(sc)) { |
| 18762 |
ecore_init_e2_firmware(sc); |
| 18763 |
sc->iro_array = e2_iro_arr; |
| 18764 |
} else { |
| 18765 |
BLOGE(sc, "Unsupported chip revision\n"); |
| 18766 |
return (-1); |
| 18767 |
} |
| 18768 |
|
| 18769 |
return (0); |
| 18770 |
} |
| 18771 |
|
| 18772 |
static void |
| 18773 |
bxe_release_firmware(struct bxe_softc *sc) |
| 18774 |
{ |
| 18775 |
/* Do nothing */ |
| 18776 |
return; |
| 18777 |
} |
| 18778 |
|
| 18779 |
static int |
| 18780 |
ecore_gunzip(struct bxe_softc *sc, |
| 18781 |
const uint8_t *zbuf, |
| 18782 |
int len) |
| 18783 |
{ |
| 18784 |
/* XXX : Implement... */ |
| 18785 |
BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); |
| 18786 |
return (FALSE); |
| 18787 |
} |
| 18788 |
|
| 18789 |
static void |
| 18790 |
ecore_reg_wr_ind(struct bxe_softc *sc, |
| 18791 |
uint32_t addr, |
| 18792 |
uint32_t val) |
| 18793 |
{ |
| 18794 |
bxe_reg_wr_ind(sc, addr, val); |
| 18795 |
} |
| 18796 |
|
| 18797 |
static void |
| 18798 |
ecore_write_dmae_phys_len(struct bxe_softc *sc, |
| 18799 |
bus_addr_t phys_addr, |
| 18800 |
uint32_t addr, |
| 18801 |
uint32_t len) |
| 18802 |
{ |
| 18803 |
bxe_write_dmae_phys_len(sc, phys_addr, addr, len); |
| 18804 |
} |
| 18805 |
|
| 18806 |
void |
| 18807 |
ecore_storm_memset_struct(struct bxe_softc *sc, |
| 18808 |
uint32_t addr, |
| 18809 |
size_t size, |
| 18810 |
uint32_t *data) |
| 18811 |
{ |
| 18812 |
uint8_t i; |
| 18813 |
for (i = 0; i < size/4; i++) { |
| 18814 |
REG_WR(sc, addr + (i * 4), data[i]); |
| 18815 |
} |
| 18816 |
} |
| 18817 |
|