1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/netdevice.h>
20 #include <linux/vmalloc.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28 #include "cn66xx_regs.h"
29 #include "cn66xx_device.h"
30 #include "cn23xx_pf_device.h"
31 #include "cn23xx_vf_device.h"
33 /** Default configuration
34 * for CN66XX OCTEON Models.
36 static struct octeon_config default_cn66xx_conf
= {
37 .card_type
= LIO_210SV
,
38 .card_name
= LIO_210SV_NAME
,
42 .max_iqs
= CN6XXX_CFG_IO_QUEUES
,
44 (CN6XXX_MAX_IQ_DESCRIPTORS
* CN6XXX_CFG_IO_QUEUES
),
45 .instr_type
= OCTEON_64BYTE_INSTR
,
46 .db_min
= CN6XXX_DB_MIN
,
47 .db_timeout
= CN6XXX_DB_TIMEOUT
,
53 .max_oqs
= CN6XXX_CFG_IO_QUEUES
,
54 .refill_threshold
= CN6XXX_OQ_REFIL_THRESHOLD
,
55 .oq_intr_pkt
= CN6XXX_OQ_INTR_PKT
,
56 .oq_intr_time
= CN6XXX_OQ_INTR_TIME
,
57 .pkts_per_intr
= CN6XXX_OQ_PKTSPER_INTR
,
61 .num_nic_ports
= DEFAULT_NUM_NIC_PORTS_66XX
,
62 .num_def_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
63 .num_def_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
64 .def_rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
66 /* For ethernet interface 0: Port cfg Attributes */
68 /* Max Txqs: Half for each of the two ports :max_iq/2 */
69 .max_txqs
= MAX_TXQS_PER_INTF
,
71 /* Actual configured value. Range could be: 1...max_txqs */
72 .num_txqs
= DEF_TXQS_PER_INTF
,
74 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
75 .max_rxqs
= MAX_RXQS_PER_INTF
,
77 /* Actual configured value. Range could be: 1...max_rxqs */
78 .num_rxqs
= DEF_RXQS_PER_INTF
,
80 /* Num of desc for rx rings */
81 .num_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
83 /* Num of desc for tx rings */
84 .num_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
86 /* SKB size, We need not change buf size even for Jumbo frames.
87 * Octeon can send jumbo frames in 4 consecutive descriptors,
89 .rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
91 .base_queue
= BASE_QUEUE_NOT_REQUESTED
,
97 /* Max Txqs: Half for each of the two ports :max_iq/2 */
98 .max_txqs
= MAX_TXQS_PER_INTF
,
100 /* Actual configured value. Range could be: 1...max_txqs */
101 .num_txqs
= DEF_TXQS_PER_INTF
,
103 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
104 .max_rxqs
= MAX_RXQS_PER_INTF
,
106 /* Actual configured value. Range could be: 1...max_rxqs */
107 .num_rxqs
= DEF_RXQS_PER_INTF
,
109 /* Num of desc for rx rings */
110 .num_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
112 /* Num of desc for tx rings */
113 .num_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
115 /* SKB size, We need not change buf size even for Jumbo frames.
116 * Octeon can send jumbo frames in 4 consecutive descriptors,
118 .rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
120 .base_queue
= BASE_QUEUE_NOT_REQUESTED
,
125 /** Miscellaneous attributes */
127 /* Host driver link query interval */
128 .oct_link_query_interval
= 100,
130 /* Octeon link query interval */
131 .host_link_query_interval
= 500,
133 .enable_sli_oq_bp
= 0,
135 /* Control queue group */
141 /** Default configuration
142 * for CN68XX OCTEON Model.
145 static struct octeon_config default_cn68xx_conf
= {
146 .card_type
= LIO_410NV
,
147 .card_name
= LIO_410NV_NAME
,
151 .max_iqs
= CN6XXX_CFG_IO_QUEUES
,
153 (CN6XXX_MAX_IQ_DESCRIPTORS
* CN6XXX_CFG_IO_QUEUES
),
154 .instr_type
= OCTEON_64BYTE_INSTR
,
155 .db_min
= CN6XXX_DB_MIN
,
156 .db_timeout
= CN6XXX_DB_TIMEOUT
,
162 .max_oqs
= CN6XXX_CFG_IO_QUEUES
,
163 .refill_threshold
= CN6XXX_OQ_REFIL_THRESHOLD
,
164 .oq_intr_pkt
= CN6XXX_OQ_INTR_PKT
,
165 .oq_intr_time
= CN6XXX_OQ_INTR_TIME
,
166 .pkts_per_intr
= CN6XXX_OQ_PKTSPER_INTR
,
170 .num_nic_ports
= DEFAULT_NUM_NIC_PORTS_68XX
,
171 .num_def_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
172 .num_def_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
173 .def_rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
176 /* Max Txqs: Half for each of the two ports :max_iq/2 */
177 .max_txqs
= MAX_TXQS_PER_INTF
,
179 /* Actual configured value. Range could be: 1...max_txqs */
180 .num_txqs
= DEF_TXQS_PER_INTF
,
182 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
183 .max_rxqs
= MAX_RXQS_PER_INTF
,
185 /* Actual configured value. Range could be: 1...max_rxqs */
186 .num_rxqs
= DEF_RXQS_PER_INTF
,
188 /* Num of desc for rx rings */
189 .num_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
191 /* Num of desc for tx rings */
192 .num_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
194 /* SKB size, We need not change buf size even for Jumbo frames.
195 * Octeon can send jumbo frames in 4 consecutive descriptors,
197 .rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
199 .base_queue
= BASE_QUEUE_NOT_REQUESTED
,
205 /* Max Txqs: Half for each of the two ports :max_iq/2 */
206 .max_txqs
= MAX_TXQS_PER_INTF
,
208 /* Actual configured value. Range could be: 1...max_txqs */
209 .num_txqs
= DEF_TXQS_PER_INTF
,
211 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
212 .max_rxqs
= MAX_RXQS_PER_INTF
,
214 /* Actual configured value. Range could be: 1...max_rxqs */
215 .num_rxqs
= DEF_RXQS_PER_INTF
,
217 /* Num of desc for rx rings */
218 .num_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
220 /* Num of desc for tx rings */
221 .num_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
223 /* SKB size, We need not change buf size even for Jumbo frames.
224 * Octeon can send jumbo frames in 4 consecutive descriptors,
226 .rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
228 .base_queue
= BASE_QUEUE_NOT_REQUESTED
,
234 /* Max Txqs: Half for each of the two ports :max_iq/2 */
235 .max_txqs
= MAX_TXQS_PER_INTF
,
237 /* Actual configured value. Range could be: 1...max_txqs */
238 .num_txqs
= DEF_TXQS_PER_INTF
,
240 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
241 .max_rxqs
= MAX_RXQS_PER_INTF
,
243 /* Actual configured value. Range could be: 1...max_rxqs */
244 .num_rxqs
= DEF_RXQS_PER_INTF
,
246 /* Num of desc for rx rings */
247 .num_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
249 /* Num of desc for tx rings */
250 .num_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
252 /* SKB size, We need not change buf size even for Jumbo frames.
253 * Octeon can send jumbo frames in 4 consecutive descriptors,
255 .rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
257 .base_queue
= BASE_QUEUE_NOT_REQUESTED
,
263 /* Max Txqs: Half for each of the two ports :max_iq/2 */
264 .max_txqs
= MAX_TXQS_PER_INTF
,
266 /* Actual configured value. Range could be: 1...max_txqs */
267 .num_txqs
= DEF_TXQS_PER_INTF
,
269 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
270 .max_rxqs
= MAX_RXQS_PER_INTF
,
272 /* Actual configured value. Range could be: 1...max_rxqs */
273 .num_rxqs
= DEF_RXQS_PER_INTF
,
275 /* Num of desc for rx rings */
276 .num_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
278 /* Num of desc for tx rings */
279 .num_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
281 /* SKB size, We need not change buf size even for Jumbo frames.
282 * Octeon can send jumbo frames in 4 consecutive descriptors,
284 .rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
286 .base_queue
= BASE_QUEUE_NOT_REQUESTED
,
291 /** Miscellaneous attributes */
293 /* Host driver link query interval */
294 .oct_link_query_interval
= 100,
296 /* Octeon link query interval */
297 .host_link_query_interval
= 500,
299 .enable_sli_oq_bp
= 0,
301 /* Control queue group */
307 /** Default configuration
308 * for CN68XX OCTEON Model.
310 static struct octeon_config default_cn68xx_210nv_conf
= {
311 .card_type
= LIO_210NV
,
312 .card_name
= LIO_210NV_NAME
,
317 .max_iqs
= CN6XXX_CFG_IO_QUEUES
,
319 (CN6XXX_MAX_IQ_DESCRIPTORS
* CN6XXX_CFG_IO_QUEUES
),
320 .instr_type
= OCTEON_64BYTE_INSTR
,
321 .db_min
= CN6XXX_DB_MIN
,
322 .db_timeout
= CN6XXX_DB_TIMEOUT
,
328 .max_oqs
= CN6XXX_CFG_IO_QUEUES
,
329 .refill_threshold
= CN6XXX_OQ_REFIL_THRESHOLD
,
330 .oq_intr_pkt
= CN6XXX_OQ_INTR_PKT
,
331 .oq_intr_time
= CN6XXX_OQ_INTR_TIME
,
332 .pkts_per_intr
= CN6XXX_OQ_PKTSPER_INTR
,
336 .num_nic_ports
= DEFAULT_NUM_NIC_PORTS_68XX_210NV
,
337 .num_def_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
338 .num_def_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
339 .def_rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
342 /* Max Txqs: Half for each of the two ports :max_iq/2 */
343 .max_txqs
= MAX_TXQS_PER_INTF
,
345 /* Actual configured value. Range could be: 1...max_txqs */
346 .num_txqs
= DEF_TXQS_PER_INTF
,
348 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
349 .max_rxqs
= MAX_RXQS_PER_INTF
,
351 /* Actual configured value. Range could be: 1...max_rxqs */
352 .num_rxqs
= DEF_RXQS_PER_INTF
,
354 /* Num of desc for rx rings */
355 .num_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
357 /* Num of desc for tx rings */
358 .num_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
360 /* SKB size, We need not change buf size even for Jumbo frames.
361 * Octeon can send jumbo frames in 4 consecutive descriptors,
363 .rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
365 .base_queue
= BASE_QUEUE_NOT_REQUESTED
,
371 /* Max Txqs: Half for each of the two ports :max_iq/2 */
372 .max_txqs
= MAX_TXQS_PER_INTF
,
374 /* Actual configured value. Range could be: 1...max_txqs */
375 .num_txqs
= DEF_TXQS_PER_INTF
,
377 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
378 .max_rxqs
= MAX_RXQS_PER_INTF
,
380 /* Actual configured value. Range could be: 1...max_rxqs */
381 .num_rxqs
= DEF_RXQS_PER_INTF
,
383 /* Num of desc for rx rings */
384 .num_rx_descs
= CN6XXX_MAX_OQ_DESCRIPTORS
,
386 /* Num of desc for tx rings */
387 .num_tx_descs
= CN6XXX_MAX_IQ_DESCRIPTORS
,
389 /* SKB size, We need not change buf size even for Jumbo frames.
390 * Octeon can send jumbo frames in 4 consecutive descriptors,
392 .rx_buf_size
= CN6XXX_OQ_BUF_SIZE
,
394 .base_queue
= BASE_QUEUE_NOT_REQUESTED
,
399 /** Miscellaneous attributes */
401 /* Host driver link query interval */
402 .oct_link_query_interval
= 100,
404 /* Octeon link query interval */
405 .host_link_query_interval
= 500,
407 .enable_sli_oq_bp
= 0,
409 /* Control queue group */
415 static struct octeon_config default_cn23xx_conf
= {
416 .card_type
= LIO_23XX
,
417 .card_name
= LIO_23XX_NAME
,
420 .max_iqs
= CN23XX_CFG_IO_QUEUES
,
421 .pending_list_size
= (CN23XX_DEFAULT_IQ_DESCRIPTORS
*
422 CN23XX_CFG_IO_QUEUES
),
423 .instr_type
= OCTEON_64BYTE_INSTR
,
424 .db_min
= CN23XX_DB_MIN
,
425 .db_timeout
= CN23XX_DB_TIMEOUT
,
426 .iq_intr_pkt
= CN23XX_DEF_IQ_INTR_THRESHOLD
,
431 .max_oqs
= CN23XX_CFG_IO_QUEUES
,
432 .pkts_per_intr
= CN23XX_OQ_PKTSPER_INTR
,
433 .refill_threshold
= CN23XX_OQ_REFIL_THRESHOLD
,
434 .oq_intr_pkt
= CN23XX_OQ_INTR_PKT
,
435 .oq_intr_time
= CN23XX_OQ_INTR_TIME
,
438 .num_nic_ports
= DEFAULT_NUM_NIC_PORTS_23XX
,
439 .num_def_rx_descs
= CN23XX_DEFAULT_OQ_DESCRIPTORS
,
440 .num_def_tx_descs
= CN23XX_DEFAULT_IQ_DESCRIPTORS
,
441 .def_rx_buf_size
= CN23XX_OQ_BUF_SIZE
,
443 /* For ethernet interface 0: Port cfg Attributes */
445 /* Max Txqs: Half for each of the two ports :max_iq/2 */
446 .max_txqs
= MAX_TXQS_PER_INTF
,
448 /* Actual configured value. Range could be: 1...max_txqs */
449 .num_txqs
= DEF_TXQS_PER_INTF
,
451 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
452 .max_rxqs
= MAX_RXQS_PER_INTF
,
454 /* Actual configured value. Range could be: 1...max_rxqs */
455 .num_rxqs
= DEF_RXQS_PER_INTF
,
457 /* Num of desc for rx rings */
458 .num_rx_descs
= CN23XX_DEFAULT_OQ_DESCRIPTORS
,
460 /* Num of desc for tx rings */
461 .num_tx_descs
= CN23XX_DEFAULT_IQ_DESCRIPTORS
,
463 /* SKB size, We need not change buf size even for Jumbo frames.
464 * Octeon can send jumbo frames in 4 consecutive descriptors,
466 .rx_buf_size
= CN23XX_OQ_BUF_SIZE
,
468 .base_queue
= BASE_QUEUE_NOT_REQUESTED
,
474 /* Max Txqs: Half for each of the two ports :max_iq/2 */
475 .max_txqs
= MAX_TXQS_PER_INTF
,
477 /* Actual configured value. Range could be: 1...max_txqs */
478 .num_txqs
= DEF_TXQS_PER_INTF
,
480 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
481 .max_rxqs
= MAX_RXQS_PER_INTF
,
483 /* Actual configured value. Range could be: 1...max_rxqs */
484 .num_rxqs
= DEF_RXQS_PER_INTF
,
486 /* Num of desc for rx rings */
487 .num_rx_descs
= CN23XX_DEFAULT_OQ_DESCRIPTORS
,
489 /* Num of desc for tx rings */
490 .num_tx_descs
= CN23XX_DEFAULT_IQ_DESCRIPTORS
,
492 /* SKB size, We need not change buf size even for Jumbo frames.
493 * Octeon can send jumbo frames in 4 consecutive descriptors,
495 .rx_buf_size
= CN23XX_OQ_BUF_SIZE
,
497 .base_queue
= BASE_QUEUE_NOT_REQUESTED
,
503 /* Host driver link query interval */
504 .oct_link_query_interval
= 100,
506 /* Octeon link query interval */
507 .host_link_query_interval
= 500,
509 .enable_sli_oq_bp
= 0,
511 /* Control queue group */
516 static struct octeon_config_ptr
{
518 } oct_conf_info
[MAX_OCTEON_DEVICES
] = {
520 OCTEON_CONFIG_TYPE_DEFAULT
,
522 OCTEON_CONFIG_TYPE_DEFAULT
,
524 OCTEON_CONFIG_TYPE_DEFAULT
,
526 OCTEON_CONFIG_TYPE_DEFAULT
,
530 static char oct_dev_state_str
[OCT_DEV_STATES
+ 1][32] = {
531 "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
532 "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
533 "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE",
534 "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
535 "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
539 static char oct_dev_app_str
[CVM_DRV_APP_COUNT
+ 1][32] = {
540 "BASE", "NIC", "UNKNOWN"};
542 static struct octeon_device
*octeon_device
[MAX_OCTEON_DEVICES
];
543 static atomic_t adapter_refcounts
[MAX_OCTEON_DEVICES
];
544 static atomic_t adapter_fw_states
[MAX_OCTEON_DEVICES
];
546 static u32 octeon_device_count
;
547 /* locks device array (i.e. octeon_device[]) */
548 static spinlock_t octeon_devices_lock
;
550 static struct octeon_core_setup core_setup
[MAX_OCTEON_DEVICES
];
552 static void oct_set_config_info(int oct_id
, int conf_type
)
554 if (conf_type
< 0 || conf_type
> (NUM_OCTEON_CONFS
- 1))
555 conf_type
= OCTEON_CONFIG_TYPE_DEFAULT
;
556 oct_conf_info
[oct_id
].conf_type
= conf_type
;
559 void octeon_init_device_list(int conf_type
)
563 memset(octeon_device
, 0, (sizeof(void *) * MAX_OCTEON_DEVICES
));
564 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++)
565 oct_set_config_info(i
, conf_type
);
566 spin_lock_init(&octeon_devices_lock
);
569 static void *__retrieve_octeon_config_info(struct octeon_device
*oct
,
572 u32 oct_id
= oct
->octeon_id
;
575 switch (oct_conf_info
[oct_id
].conf_type
) {
576 case OCTEON_CONFIG_TYPE_DEFAULT
:
577 if (oct
->chip_id
== OCTEON_CN66XX
) {
578 ret
= &default_cn66xx_conf
;
579 } else if ((oct
->chip_id
== OCTEON_CN68XX
) &&
580 (card_type
== LIO_210NV
)) {
581 ret
= &default_cn68xx_210nv_conf
;
582 } else if ((oct
->chip_id
== OCTEON_CN68XX
) &&
583 (card_type
== LIO_410NV
)) {
584 ret
= &default_cn68xx_conf
;
585 } else if (oct
->chip_id
== OCTEON_CN23XX_PF_VID
) {
586 ret
= &default_cn23xx_conf
;
587 } else if (oct
->chip_id
== OCTEON_CN23XX_VF_VID
) {
588 ret
= &default_cn23xx_conf
;
597 static int __verify_octeon_config_info(struct octeon_device
*oct
, void *conf
)
599 switch (oct
->chip_id
) {
602 return lio_validate_cn6xxx_config_info(oct
, conf
);
603 case OCTEON_CN23XX_PF_VID
:
604 case OCTEON_CN23XX_VF_VID
:
613 void *oct_get_config_info(struct octeon_device
*oct
, u16 card_type
)
617 conf
= __retrieve_octeon_config_info(oct
, card_type
);
621 if (__verify_octeon_config_info(oct
, conf
)) {
622 dev_err(&oct
->pci_dev
->dev
, "Configuration verification failed\n");
629 char *lio_get_state_string(atomic_t
*state_ptr
)
631 s32 istate
= (s32
)atomic_read(state_ptr
);
633 if (istate
> OCT_DEV_STATES
|| istate
< 0)
634 return oct_dev_state_str
[OCT_DEV_STATE_INVALID
];
635 return oct_dev_state_str
[istate
];
638 static char *get_oct_app_string(u32 app_mode
)
640 if (app_mode
<= CVM_DRV_APP_END
)
641 return oct_dev_app_str
[app_mode
- CVM_DRV_APP_START
];
642 return oct_dev_app_str
[CVM_DRV_INVALID_APP
- CVM_DRV_APP_START
];
645 void octeon_free_device_mem(struct octeon_device
*oct
)
649 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
650 if (oct
->io_qmask
.oq
& BIT_ULL(i
))
654 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
655 if (oct
->io_qmask
.iq
& BIT_ULL(i
))
656 vfree(oct
->instr_queue
[i
]);
662 octeon_device
[i
] = NULL
;
663 octeon_device_count
--;
666 static struct octeon_device
*octeon_allocate_device_mem(u32 pci_id
,
669 struct octeon_device
*oct
;
671 u32 octdevsize
= 0, configsize
= 0, size
;
676 configsize
= sizeof(struct octeon_cn6xxx
);
679 case OCTEON_CN23XX_PF_VID
:
680 configsize
= sizeof(struct octeon_cn23xx_pf
);
682 case OCTEON_CN23XX_VF_VID
:
683 configsize
= sizeof(struct octeon_cn23xx_vf
);
686 pr_err("%s: Unknown PCI Device: 0x%x\n",
692 if (configsize
& 0x7)
693 configsize
+= (8 - (configsize
& 0x7));
695 octdevsize
= sizeof(struct octeon_device
);
696 if (octdevsize
& 0x7)
697 octdevsize
+= (8 - (octdevsize
& 0x7));
700 priv_size
+= (8 - (priv_size
& 0x7));
702 size
= octdevsize
+ priv_size
+ configsize
+
703 (sizeof(struct octeon_dispatch
) * DISPATCH_LIST_SIZE
);
709 oct
= (struct octeon_device
*)buf
;
710 oct
->priv
= (void *)(buf
+ octdevsize
);
711 oct
->chip
= (void *)(buf
+ octdevsize
+ priv_size
);
712 oct
->dispatch
.dlist
= (struct octeon_dispatch
*)
713 (buf
+ octdevsize
+ priv_size
+ configsize
);
718 struct octeon_device
*octeon_allocate_device(u32 pci_id
,
722 struct octeon_device
*oct
= NULL
;
724 spin_lock(&octeon_devices_lock
);
726 for (oct_idx
= 0; oct_idx
< MAX_OCTEON_DEVICES
; oct_idx
++)
727 if (!octeon_device
[oct_idx
])
730 if (oct_idx
< MAX_OCTEON_DEVICES
) {
731 oct
= octeon_allocate_device_mem(pci_id
, priv_size
);
733 octeon_device_count
++;
734 octeon_device
[oct_idx
] = oct
;
738 spin_unlock(&octeon_devices_lock
);
742 spin_lock_init(&oct
->pci_win_lock
);
743 spin_lock_init(&oct
->mem_access_lock
);
745 oct
->octeon_id
= oct_idx
;
746 snprintf(oct
->device_name
, sizeof(oct
->device_name
),
747 "LiquidIO%d", (oct
->octeon_id
));
752 /** Register a device's bus location at initialization time.
753 * @param octeon_dev - pointer to the octeon device structure.
754 * @param bus - PCIe bus #
755 * @param dev - PCIe device #
756 * @param func - PCIe function #
757 * @param is_pf - TRUE for PF, FALSE for VF
758 * @return reference count of device's adapter
760 int octeon_register_device(struct octeon_device
*oct
,
761 int bus
, int dev
, int func
, int is_pf
)
767 oct
->loc
.func
= func
;
769 oct
->adapter_refcount
= &adapter_refcounts
[oct
->octeon_id
];
770 atomic_set(oct
->adapter_refcount
, 0);
772 /* Like the reference count, the f/w state is shared 'per-adapter' */
773 oct
->adapter_fw_state
= &adapter_fw_states
[oct
->octeon_id
];
774 atomic_set(oct
->adapter_fw_state
, FW_NEEDS_TO_BE_LOADED
);
776 spin_lock(&octeon_devices_lock
);
777 for (idx
= (int)oct
->octeon_id
- 1; idx
>= 0; idx
--) {
778 if (!octeon_device
[idx
]) {
779 dev_err(&oct
->pci_dev
->dev
,
780 "%s: Internal driver error, missing dev",
782 spin_unlock(&octeon_devices_lock
);
783 atomic_inc(oct
->adapter_refcount
);
784 return 1; /* here, refcount is guaranteed to be 1 */
786 /* If another device is at same bus/dev, use its refcounter
787 * (and f/w state variable).
789 if ((octeon_device
[idx
]->loc
.bus
== bus
) &&
790 (octeon_device
[idx
]->loc
.dev
== dev
)) {
791 oct
->adapter_refcount
=
792 octeon_device
[idx
]->adapter_refcount
;
793 oct
->adapter_fw_state
=
794 octeon_device
[idx
]->adapter_fw_state
;
798 spin_unlock(&octeon_devices_lock
);
800 atomic_inc(oct
->adapter_refcount
);
801 refcount
= atomic_read(oct
->adapter_refcount
);
803 dev_dbg(&oct
->pci_dev
->dev
, "%s: %02x:%02x:%d refcount %u", __func__
,
804 oct
->loc
.bus
, oct
->loc
.dev
, oct
->loc
.func
, refcount
);
809 /** Deregister a device at de-initialization time.
810 * @param octeon_dev - pointer to the octeon device structure.
811 * @return reference count of device's adapter
813 int octeon_deregister_device(struct octeon_device
*oct
)
817 atomic_dec(oct
->adapter_refcount
);
818 refcount
= atomic_read(oct
->adapter_refcount
);
820 dev_dbg(&oct
->pci_dev
->dev
, "%s: %04d:%02d:%d refcount %u", __func__
,
821 oct
->loc
.bus
, oct
->loc
.dev
, oct
->loc
.func
, refcount
);
827 octeon_allocate_ioq_vector(struct octeon_device
*oct
, u32 num_ioqs
)
829 struct octeon_ioq_vector
*ioq_vector
;
834 size
= sizeof(struct octeon_ioq_vector
) * num_ioqs
;
836 oct
->ioq_vector
= vzalloc(size
);
837 if (!oct
->ioq_vector
)
839 for (i
= 0; i
< num_ioqs
; i
++) {
840 ioq_vector
= &oct
->ioq_vector
[i
];
841 ioq_vector
->oct_dev
= oct
;
842 ioq_vector
->iq_index
= i
;
843 ioq_vector
->droq_index
= i
;
844 ioq_vector
->mbox
= oct
->mbox
[i
];
846 cpu_num
= i
% num_online_cpus();
847 cpumask_set_cpu(cpu_num
, &ioq_vector
->affinity_mask
);
849 if (oct
->chip_id
== OCTEON_CN23XX_PF_VID
)
850 ioq_vector
->ioq_num
= i
+ oct
->sriov_info
.pf_srn
;
852 ioq_vector
->ioq_num
= i
;
859 octeon_free_ioq_vector(struct octeon_device
*oct
)
861 vfree(oct
->ioq_vector
);
864 /* this function is only for setting up the first queue */
865 int octeon_setup_instr_queues(struct octeon_device
*oct
)
869 union oct_txpciq txpciq
;
870 int numa_node
= dev_to_node(&oct
->pci_dev
->dev
);
872 if (OCTEON_CN6XXX(oct
))
874 CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct
, cn6xxx
));
875 else if (OCTEON_CN23XX_PF(oct
))
876 num_descs
= CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct
, cn23xx_pf
));
877 else if (OCTEON_CN23XX_VF(oct
))
878 num_descs
= CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct
, cn23xx_vf
));
882 oct
->instr_queue
[0] = vzalloc_node(sizeof(*oct
->instr_queue
[0]),
884 if (!oct
->instr_queue
[0])
885 oct
->instr_queue
[0] =
886 vzalloc(sizeof(struct octeon_instr_queue
));
887 if (!oct
->instr_queue
[0])
889 memset(oct
->instr_queue
[0], 0, sizeof(struct octeon_instr_queue
));
890 oct
->instr_queue
[0]->q_index
= 0;
891 oct
->instr_queue
[0]->app_ctx
= (void *)(size_t)0;
892 oct
->instr_queue
[0]->ifidx
= 0;
894 txpciq
.s
.q_no
= iq_no
;
895 txpciq
.s
.pkind
= oct
->pfvf_hsword
.pkind
;
896 txpciq
.s
.use_qpg
= 0;
898 if (octeon_init_instr_queue(oct
, txpciq
, num_descs
)) {
899 /* prevent memory leak */
900 vfree(oct
->instr_queue
[0]);
901 oct
->instr_queue
[0] = NULL
;
909 int octeon_setup_output_queues(struct octeon_device
*oct
)
914 int numa_node
= dev_to_node(&oct
->pci_dev
->dev
);
916 if (OCTEON_CN6XXX(oct
)) {
918 CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct
, cn6xxx
));
920 CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct
, cn6xxx
));
921 } else if (OCTEON_CN23XX_PF(oct
)) {
922 num_descs
= CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct
, cn23xx_pf
));
923 desc_size
= CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct
, cn23xx_pf
));
924 } else if (OCTEON_CN23XX_VF(oct
)) {
925 num_descs
= CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct
, cn23xx_vf
));
926 desc_size
= CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct
, cn23xx_vf
));
929 oct
->droq
[0] = vzalloc_node(sizeof(*oct
->droq
[0]), numa_node
);
931 oct
->droq
[0] = vzalloc(sizeof(*oct
->droq
[0]));
935 if (octeon_init_droq(oct
, oq_no
, num_descs
, desc_size
, NULL
)) {
936 vfree(oct
->droq
[oq_no
]);
937 oct
->droq
[oq_no
] = NULL
;
945 int octeon_set_io_queues_off(struct octeon_device
*oct
)
947 int loop
= BUSY_READING_REG_VF_LOOP_COUNT
;
949 if (OCTEON_CN6XXX(oct
)) {
950 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, 0);
951 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, 0);
952 } else if (oct
->chip_id
== OCTEON_CN23XX_VF_VID
) {
955 /* IOQs will already be in reset.
956 * If RST bit is set, wait for quiet bit to be set.
957 * Once quiet bit is set, clear the RST bit.
959 for (q_no
= 0; q_no
< oct
->sriov_info
.rings_per_vf
; q_no
++) {
960 u64 reg_val
= octeon_read_csr64(
961 oct
, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no
));
963 while ((reg_val
& CN23XX_PKT_INPUT_CTL_RST
) &&
964 !(reg_val
& CN23XX_PKT_INPUT_CTL_QUIET
) &&
966 reg_val
= octeon_read_csr64(
967 oct
, CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
971 dev_err(&oct
->pci_dev
->dev
,
972 "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
977 reg_val
= reg_val
& ~CN23XX_PKT_INPUT_CTL_RST
;
978 octeon_write_csr64(oct
,
979 CN23XX_SLI_IQ_PKT_CONTROL64(q_no
),
982 reg_val
= octeon_read_csr64(
983 oct
, CN23XX_SLI_IQ_PKT_CONTROL64(q_no
));
984 if (reg_val
& CN23XX_PKT_INPUT_CTL_RST
) {
985 dev_err(&oct
->pci_dev
->dev
,
986 "unable to reset qno %u\n", q_no
);
994 void octeon_set_droq_pkt_op(struct octeon_device
*oct
,
1000 /* Disable the i/p and o/p queues for this Octeon. */
1001 if (OCTEON_CN6XXX(oct
)) {
1002 reg_val
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
);
1005 reg_val
= reg_val
| (1 << q_no
);
1007 reg_val
= reg_val
& (~(1 << q_no
));
1009 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, reg_val
);
1013 int octeon_init_dispatch_list(struct octeon_device
*oct
)
1017 oct
->dispatch
.count
= 0;
1019 for (i
= 0; i
< DISPATCH_LIST_SIZE
; i
++) {
1020 oct
->dispatch
.dlist
[i
].opcode
= 0;
1021 INIT_LIST_HEAD(&oct
->dispatch
.dlist
[i
].list
);
1024 for (i
= 0; i
<= REQTYPE_LAST
; i
++)
1025 octeon_register_reqtype_free_fn(oct
, i
, NULL
);
1027 spin_lock_init(&oct
->dispatch
.lock
);
1032 void octeon_delete_dispatch_list(struct octeon_device
*oct
)
1035 struct list_head freelist
, *temp
, *tmp2
;
1037 INIT_LIST_HEAD(&freelist
);
1039 spin_lock_bh(&oct
->dispatch
.lock
);
1041 for (i
= 0; i
< DISPATCH_LIST_SIZE
; i
++) {
1042 struct list_head
*dispatch
;
1044 dispatch
= &oct
->dispatch
.dlist
[i
].list
;
1045 while (dispatch
->next
!= dispatch
) {
1046 temp
= dispatch
->next
;
1047 list_move_tail(temp
, &freelist
);
1050 oct
->dispatch
.dlist
[i
].opcode
= 0;
1053 oct
->dispatch
.count
= 0;
1055 spin_unlock_bh(&oct
->dispatch
.lock
);
1057 list_for_each_safe(temp
, tmp2
, &freelist
) {
1063 octeon_dispatch_fn_t
1064 octeon_get_dispatch(struct octeon_device
*octeon_dev
, u16 opcode
,
1068 struct list_head
*dispatch
;
1069 octeon_dispatch_fn_t fn
= NULL
;
1070 u16 combined_opcode
= OPCODE_SUBCODE(opcode
, subcode
);
1072 idx
= combined_opcode
& OCTEON_OPCODE_MASK
;
1074 spin_lock_bh(&octeon_dev
->dispatch
.lock
);
1076 if (octeon_dev
->dispatch
.count
== 0) {
1077 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
1081 if (!(octeon_dev
->dispatch
.dlist
[idx
].opcode
)) {
1082 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
1086 if (octeon_dev
->dispatch
.dlist
[idx
].opcode
== combined_opcode
) {
1087 fn
= octeon_dev
->dispatch
.dlist
[idx
].dispatch_fn
;
1089 list_for_each(dispatch
,
1090 &octeon_dev
->dispatch
.dlist
[idx
].list
) {
1091 if (((struct octeon_dispatch
*)dispatch
)->opcode
==
1093 fn
= ((struct octeon_dispatch
*)
1094 dispatch
)->dispatch_fn
;
1100 spin_unlock_bh(&octeon_dev
->dispatch
.lock
);
1104 /* octeon_register_dispatch_fn
1106 * octeon_id - id of the octeon device.
1107 * opcode - opcode for which driver should call the registered function
1108 * subcode - subcode for which driver should call the registered function
1109 * fn - The function to call when a packet with "opcode" arrives in
1110 * octeon output queues.
1111 * fn_arg - The argument to be passed when calling function "fn".
1113 * Registers a function and its argument to be called when a packet
1114 * arrives in Octeon output queues with "opcode".
1119 * No locks are held.
1122 octeon_register_dispatch_fn(struct octeon_device
*oct
,
1125 octeon_dispatch_fn_t fn
, void *fn_arg
)
1128 octeon_dispatch_fn_t pfn
;
1129 u16 combined_opcode
= OPCODE_SUBCODE(opcode
, subcode
);
1131 idx
= combined_opcode
& OCTEON_OPCODE_MASK
;
1133 spin_lock_bh(&oct
->dispatch
.lock
);
1134 /* Add dispatch function to first level of lookup table */
1135 if (oct
->dispatch
.dlist
[idx
].opcode
== 0) {
1136 oct
->dispatch
.dlist
[idx
].opcode
= combined_opcode
;
1137 oct
->dispatch
.dlist
[idx
].dispatch_fn
= fn
;
1138 oct
->dispatch
.dlist
[idx
].arg
= fn_arg
;
1139 oct
->dispatch
.count
++;
1140 spin_unlock_bh(&oct
->dispatch
.lock
);
1144 spin_unlock_bh(&oct
->dispatch
.lock
);
1146 /* Check if there was a function already registered for this
1149 pfn
= octeon_get_dispatch(oct
, opcode
, subcode
);
1151 struct octeon_dispatch
*dispatch
;
1153 dev_dbg(&oct
->pci_dev
->dev
,
1154 "Adding opcode to dispatch list linked list\n");
1155 dispatch
= (struct octeon_dispatch
*)
1156 vmalloc(sizeof(struct octeon_dispatch
));
1158 dev_err(&oct
->pci_dev
->dev
,
1159 "No memory to add dispatch function\n");
1162 dispatch
->opcode
= combined_opcode
;
1163 dispatch
->dispatch_fn
= fn
;
1164 dispatch
->arg
= fn_arg
;
1166 /* Add dispatch function to linked list of fn ptrs
1167 * at the hashed index.
1169 spin_lock_bh(&oct
->dispatch
.lock
);
1170 list_add(&dispatch
->list
, &oct
->dispatch
.dlist
[idx
].list
);
1171 oct
->dispatch
.count
++;
1172 spin_unlock_bh(&oct
->dispatch
.lock
);
1176 octeon_get_dispatch_arg(oct
, opcode
, subcode
) == fn_arg
)
1179 dev_err(&oct
->pci_dev
->dev
,
1180 "Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
1188 int octeon_core_drv_init(struct octeon_recv_info
*recv_info
, void *buf
)
1192 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
1193 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
1194 struct octeon_core_setup
*cs
= NULL
;
1195 u32 num_nic_ports
= 0;
1197 if (OCTEON_CN6XXX(oct
))
1199 CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct
, cn6xxx
));
1200 else if (OCTEON_CN23XX_PF(oct
))
1202 CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct
, cn23xx_pf
));
1204 if (atomic_read(&oct
->status
) >= OCT_DEV_RUNNING
) {
1205 dev_err(&oct
->pci_dev
->dev
, "Received CORE OK when device state is 0x%x\n",
1206 atomic_read(&oct
->status
));
1207 goto core_drv_init_err
;
1212 (u32
)recv_pkt
->rh
.r_core_drv_init
.app_mode
),
1213 sizeof(app_name
) - 1);
1214 oct
->app_mode
= (u32
)recv_pkt
->rh
.r_core_drv_init
.app_mode
;
1215 if (recv_pkt
->rh
.r_core_drv_init
.app_mode
== CVM_DRV_NIC_APP
) {
1216 oct
->fw_info
.max_nic_ports
=
1217 (u32
)recv_pkt
->rh
.r_core_drv_init
.max_nic_ports
;
1218 oct
->fw_info
.num_gmx_ports
=
1219 (u32
)recv_pkt
->rh
.r_core_drv_init
.num_gmx_ports
;
1222 if (oct
->fw_info
.max_nic_ports
< num_nic_ports
) {
1223 dev_err(&oct
->pci_dev
->dev
,
1224 "Config has more ports than firmware allows (%d > %d).\n",
1225 num_nic_ports
, oct
->fw_info
.max_nic_ports
);
1226 goto core_drv_init_err
;
1228 oct
->fw_info
.app_cap_flags
= recv_pkt
->rh
.r_core_drv_init
.app_cap_flags
;
1229 oct
->fw_info
.app_mode
= (u32
)recv_pkt
->rh
.r_core_drv_init
.app_mode
;
1230 oct
->pfvf_hsword
.app_mode
= (u32
)recv_pkt
->rh
.r_core_drv_init
.app_mode
;
1232 oct
->pfvf_hsword
.pkind
= recv_pkt
->rh
.r_core_drv_init
.pkind
;
1234 for (i
= 0; i
< oct
->num_iqs
; i
++)
1235 oct
->instr_queue
[i
]->txpciq
.s
.pkind
= oct
->pfvf_hsword
.pkind
;
1237 atomic_set(&oct
->status
, OCT_DEV_CORE_OK
);
1239 cs
= &core_setup
[oct
->octeon_id
];
1241 if (recv_pkt
->buffer_size
[0] != (sizeof(*cs
) + OCT_DROQ_INFO_SIZE
)) {
1242 dev_dbg(&oct
->pci_dev
->dev
, "Core setup bytes expected %u found %d\n",
1244 recv_pkt
->buffer_size
[0]);
1248 recv_pkt
->buffer_ptr
[0]) + OCT_DROQ_INFO_SIZE
, sizeof(*cs
));
1250 strncpy(oct
->boardinfo
.name
, cs
->boardname
, OCT_BOARD_NAME
);
1251 strncpy(oct
->boardinfo
.serial_number
, cs
->board_serial_number
,
1254 octeon_swap_8B_data((u64
*)cs
, (sizeof(*cs
) >> 3));
1256 oct
->boardinfo
.major
= cs
->board_rev_major
;
1257 oct
->boardinfo
.minor
= cs
->board_rev_minor
;
1259 dev_info(&oct
->pci_dev
->dev
,
1260 "Running %s (%llu Hz)\n",
1261 app_name
, CVM_CAST64(cs
->corefreq
));
1264 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
1265 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
1266 octeon_free_recv_info(recv_info
);
1270 int octeon_get_tx_qsize(struct octeon_device
*oct
, u32 q_no
)
1273 if (oct
&& (q_no
< MAX_OCTEON_INSTR_QUEUES(oct
)) &&
1274 (oct
->io_qmask
.iq
& BIT_ULL(q_no
)))
1275 return oct
->instr_queue
[q_no
]->max_count
;
1280 int octeon_get_rx_qsize(struct octeon_device
*oct
, u32 q_no
)
1282 if (oct
&& (q_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
)) &&
1283 (oct
->io_qmask
.oq
& BIT_ULL(q_no
)))
1284 return oct
->droq
[q_no
]->max_count
;
1288 /* Retruns the host firmware handshake OCTEON specific configuration */
1289 struct octeon_config
*octeon_get_conf(struct octeon_device
*oct
)
1291 struct octeon_config
*default_oct_conf
= NULL
;
1293 /* check the OCTEON Device model & return the corresponding octeon
1297 if (OCTEON_CN6XXX(oct
)) {
1299 (struct octeon_config
*)(CHIP_CONF(oct
, cn6xxx
));
1300 } else if (OCTEON_CN23XX_PF(oct
)) {
1301 default_oct_conf
= (struct octeon_config
*)
1302 (CHIP_CONF(oct
, cn23xx_pf
));
1303 } else if (OCTEON_CN23XX_VF(oct
)) {
1304 default_oct_conf
= (struct octeon_config
*)
1305 (CHIP_CONF(oct
, cn23xx_vf
));
1307 return default_oct_conf
;
1310 /* scratch register address is same in all the OCT-II and CN70XX models */
1311 #define CNXX_SLI_SCRATCH1 0x3C0
1313 /** Get the octeon device pointer.
1314 * @param octeon_id - The id for which the octeon device pointer is required.
1315 * @return Success: Octeon device pointer.
1316 * @return Failure: NULL.
1318 struct octeon_device
*lio_get_device(u32 octeon_id
)
1320 if (octeon_id
>= MAX_OCTEON_DEVICES
)
1323 return octeon_device
[octeon_id
];
1326 u64
lio_pci_readq(struct octeon_device
*oct
, u64 addr
)
1329 unsigned long flags
;
1332 spin_lock_irqsave(&oct
->pci_win_lock
, flags
);
1334 /* The windowed read happens when the LSB of the addr is written.
1335 * So write MSB first
1337 addrhi
= (addr
>> 32);
1338 if ((oct
->chip_id
== OCTEON_CN66XX
) ||
1339 (oct
->chip_id
== OCTEON_CN68XX
) ||
1340 (oct
->chip_id
== OCTEON_CN23XX_PF_VID
))
1341 addrhi
|= 0x00060000;
1342 writel(addrhi
, oct
->reg_list
.pci_win_rd_addr_hi
);
1344 /* Read back to preserve ordering of writes */
1345 val32
= readl(oct
->reg_list
.pci_win_rd_addr_hi
);
1347 writel(addr
& 0xffffffff, oct
->reg_list
.pci_win_rd_addr_lo
);
1348 val32
= readl(oct
->reg_list
.pci_win_rd_addr_lo
);
1350 val64
= readq(oct
->reg_list
.pci_win_rd_data
);
1352 spin_unlock_irqrestore(&oct
->pci_win_lock
, flags
);
1357 void lio_pci_writeq(struct octeon_device
*oct
,
1362 unsigned long flags
;
1364 spin_lock_irqsave(&oct
->pci_win_lock
, flags
);
1366 writeq(addr
, oct
->reg_list
.pci_win_wr_addr
);
1368 /* The write happens when the LSB is written. So write MSB first. */
1369 writel(val
>> 32, oct
->reg_list
.pci_win_wr_data_hi
);
1370 /* Read the MSB to ensure ordering of writes. */
1371 val32
= readl(oct
->reg_list
.pci_win_wr_data_hi
);
1373 writel(val
& 0xffffffff, oct
->reg_list
.pci_win_wr_data_lo
);
1375 spin_unlock_irqrestore(&oct
->pci_win_lock
, flags
);
1378 int octeon_mem_access_ok(struct octeon_device
*oct
)
1380 u64 access_okay
= 0;
1383 /* Check to make sure a DDR interface is enabled */
1384 if (OCTEON_CN23XX_PF(oct
)) {
1385 lmc0_reset_ctl
= lio_pci_readq(oct
, CN23XX_LMC0_RESET_CTL
);
1387 (lmc0_reset_ctl
& CN23XX_LMC0_RESET_CTL_DDR3RST_MASK
);
1389 lmc0_reset_ctl
= lio_pci_readq(oct
, CN6XXX_LMC0_RESET_CTL
);
1391 (lmc0_reset_ctl
& CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK
);
1394 return access_okay
? 0 : 1;
1397 int octeon_wait_for_ddr_init(struct octeon_device
*oct
, u32
*timeout
)
1405 for (ms
= 0; (ret
!= 0) && ((*timeout
== 0) || (ms
<= *timeout
));
1407 ret
= octeon_mem_access_ok(oct
);
1411 schedule_timeout_uninterruptible(HZ
/ 10);
1417 /** Get the octeon id assigned to the octeon device passed as argument.
1418 * This function is exported to other modules.
1419 * @param dev - octeon device pointer passed as a void *.
1420 * @return octeon device id
1422 int lio_get_device_id(void *dev
)
1424 struct octeon_device
*octeon_dev
= (struct octeon_device
*)dev
;
1427 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++)
1428 if (octeon_device
[i
] == octeon_dev
)
1429 return octeon_dev
->octeon_id
;
1433 void lio_enable_irq(struct octeon_droq
*droq
, struct octeon_instr_queue
*iq
)
1437 struct octeon_device
*oct
= NULL
;
1439 /* the whole thing needs to be atomic, ideally */
1441 pkts_pend
= (u32
)atomic_read(&droq
->pkts_pending
);
1442 writel(droq
->pkt_count
- pkts_pend
, droq
->pkts_sent_reg
);
1443 droq
->pkt_count
= pkts_pend
;
1444 oct
= droq
->oct_dev
;
1447 spin_lock_bh(&iq
->lock
);
1448 writel(iq
->pkts_processed
, iq
->inst_cnt_reg
);
1449 iq
->pkt_in_done
-= iq
->pkts_processed
;
1450 iq
->pkts_processed
= 0;
1451 /* this write needs to be flushed before we release the lock */
1452 spin_unlock_bh(&iq
->lock
);
1455 /*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough
1456 *to trigger tx interrupts as well, if they are pending.
1458 if (oct
&& (OCTEON_CN23XX_PF(oct
) || OCTEON_CN23XX_VF(oct
))) {
1460 writeq(CN23XX_INTR_RESEND
, droq
->pkts_sent_reg
);
1461 /*we race with firmrware here. read and write the IN_DONE_CNTS*/
1463 instr_cnt
= readq(iq
->inst_cnt_reg
);
1464 writeq(((instr_cnt
& 0xFFFFFFFF00000000ULL
) |
1465 CN23XX_INTR_RESEND
),