2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/spinlock_types.h>
40 #include <linux/semaphore.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
43 #include <linux/radix-tree.h>
45 #include <linux/mlx5/device.h>
46 #include <linux/mlx5/doorbell.h>
49 MLX5_BOARD_ID_LEN
= 64,
50 MLX5_MAX_NAME_LEN
= 16,
54 /* one minute for the sake of bringup. Generally, commands must always
55 * complete and we may need to increase this timeout value
57 MLX5_CMD_TIMEOUT_MSEC
= 60 * 1000,
58 MLX5_CMD_WQ_MAX_NAME
= 32,
64 CMD_STATUS_SUCCESS
= 0,
70 MLX5_SQP_IEEE_1588
= 2,
72 MLX5_SQP_SYNC_UMR
= 4,
80 MLX5_EQ_VEC_PAGES
= 0,
82 MLX5_EQ_VEC_ASYNC
= 2,
83 MLX5_EQ_VEC_COMP_BASE
,
87 MLX5_MAX_IRQ_NAME
= 32
91 MLX5_ATOMIC_MODE_IB_COMP
= 1 << 16,
92 MLX5_ATOMIC_MODE_CX
= 2 << 16,
93 MLX5_ATOMIC_MODE_8B
= 3 << 16,
94 MLX5_ATOMIC_MODE_16B
= 4 << 16,
95 MLX5_ATOMIC_MODE_32B
= 5 << 16,
96 MLX5_ATOMIC_MODE_64B
= 6 << 16,
97 MLX5_ATOMIC_MODE_128B
= 7 << 16,
98 MLX5_ATOMIC_MODE_256B
= 8 << 16,
102 MLX5_REG_QETCR
= 0x4005,
103 MLX5_REG_QTCT
= 0x400a,
104 MLX5_REG_PCAP
= 0x5001,
105 MLX5_REG_PMTU
= 0x5003,
106 MLX5_REG_PTYS
= 0x5004,
107 MLX5_REG_PAOS
= 0x5006,
108 MLX5_REG_PFCC
= 0x5007,
109 MLX5_REG_PPCNT
= 0x5008,
110 MLX5_REG_PMAOS
= 0x5012,
111 MLX5_REG_PUDE
= 0x5009,
112 MLX5_REG_PMPE
= 0x5010,
113 MLX5_REG_PELC
= 0x500e,
114 MLX5_REG_PVLC
= 0x500f,
115 MLX5_REG_PMLP
= 0, /* TBD */
116 MLX5_REG_NODE_DESC
= 0x6001,
117 MLX5_REG_HOST_ENDIANNESS
= 0x7004,
121 MLX5_ATOMIC_OPS_CMP_SWAP
= 1 << 0,
122 MLX5_ATOMIC_OPS_FETCH_ADD
= 1 << 1,
125 enum mlx5_page_fault_resume_flags
{
126 MLX5_PAGE_FAULT_RESUME_REQUESTOR
= 1 << 0,
127 MLX5_PAGE_FAULT_RESUME_WRITE
= 1 << 1,
128 MLX5_PAGE_FAULT_RESUME_RDMA
= 1 << 2,
129 MLX5_PAGE_FAULT_RESUME_ERROR
= 1 << 7,
138 struct mlx5_field_desc
{
143 struct mlx5_rsc_debug
{
144 struct mlx5_core_dev
*dev
;
146 enum dbg_rsc_type type
;
148 struct mlx5_field_desc fields
[0];
151 enum mlx5_dev_event
{
152 MLX5_DEV_EVENT_SYS_ERROR
,
153 MLX5_DEV_EVENT_PORT_UP
,
154 MLX5_DEV_EVENT_PORT_DOWN
,
155 MLX5_DEV_EVENT_PORT_INITIALIZED
,
156 MLX5_DEV_EVENT_LID_CHANGE
,
157 MLX5_DEV_EVENT_PKEY_CHANGE
,
158 MLX5_DEV_EVENT_GUID_CHANGE
,
159 MLX5_DEV_EVENT_CLIENT_REREG
,
162 enum mlx5_port_status
{
167 struct mlx5_uuar_info
{
168 struct mlx5_uar
*uars
;
170 int num_low_latency_uuars
;
171 unsigned long *bitmap
;
176 * protect uuar allocation data structs
184 void __iomem
*regreg
;
186 struct mlx5_uar
*uar
;
187 unsigned long offset
;
189 /* protect blue flame buffer selection when needed
193 /* serialize 64 bit writes when done as two 32 bit accesses
199 struct mlx5_cmd_first
{
203 struct mlx5_cmd_msg
{
204 struct list_head list
;
205 struct cache_ent
*cache
;
207 struct mlx5_cmd_first first
;
208 struct mlx5_cmd_mailbox
*next
;
211 struct mlx5_cmd_debug
{
212 struct dentry
*dbg_root
;
213 struct dentry
*dbg_in
;
214 struct dentry
*dbg_out
;
215 struct dentry
*dbg_outlen
;
216 struct dentry
*dbg_status
;
217 struct dentry
*dbg_run
;
226 /* protect block chain allocations
229 struct list_head head
;
232 struct cmd_msg_cache
{
233 struct cache_ent large
;
234 struct cache_ent med
;
238 struct mlx5_cmd_stats
{
243 struct dentry
*count
;
244 /* protect command average calculations */
250 dma_addr_t alloc_dma
;
261 /* protect command queue allocations
263 spinlock_t alloc_lock
;
265 /* protect token allocations
267 spinlock_t token_lock
;
269 unsigned long bitmask
;
270 char wq_name
[MLX5_CMD_WQ_MAX_NAME
];
271 struct workqueue_struct
*wq
;
272 struct semaphore sem
;
273 struct semaphore pages_sem
;
275 struct mlx5_cmd_work_ent
*ent_arr
[MLX5_MAX_COMMANDS
];
276 struct pci_pool
*pool
;
277 struct mlx5_cmd_debug dbg
;
278 struct cmd_msg_cache cache
;
279 int checksum_disabled
;
280 struct mlx5_cmd_stats stats
[MLX5_CMD_OP_MAX
];
283 struct mlx5_port_caps
{
289 struct mlx5_cmd_mailbox
{
292 struct mlx5_cmd_mailbox
*next
;
295 struct mlx5_buf_list
{
301 struct mlx5_buf_list direct
;
308 struct mlx5_core_dev
*dev
;
309 __be32 __iomem
*doorbell
;
317 struct list_head list
;
319 struct mlx5_rsc_debug
*dbg
;
322 struct mlx5_core_psv
{
334 struct mlx5_core_sig_ctx
{
335 struct mlx5_core_psv psv_memory
;
336 struct mlx5_core_psv psv_wire
;
337 struct ib_sig_err err_item
;
338 bool sig_status_checked
;
343 struct mlx5_core_mkey
{
351 MLX5_RES_QP
= MLX5_EVENT_QUEUE_TYPE_QP
,
352 MLX5_RES_RQ
= MLX5_EVENT_QUEUE_TYPE_RQ
,
353 MLX5_RES_SQ
= MLX5_EVENT_QUEUE_TYPE_SQ
,
358 struct mlx5_core_rsc_common
{
359 enum mlx5_res_type res
;
361 struct completion free
;
364 struct mlx5_core_srq
{
365 struct mlx5_core_rsc_common common
; /* must be first */
369 int max_avail_gather
;
371 void (*event
) (struct mlx5_core_srq
*, enum mlx5_event
);
374 struct completion free
;
377 struct mlx5_eq_table
{
378 void __iomem
*update_ci
;
379 void __iomem
*update_arm_ci
;
380 struct list_head comp_eqs_list
;
381 struct mlx5_eq pages_eq
;
382 struct mlx5_eq async_eq
;
383 struct mlx5_eq cmd_eq
;
384 int num_comp_vectors
;
392 struct list_head bf_list
;
393 unsigned free_bf_bmap
;
394 void __iomem
*bf_map
;
399 struct mlx5_core_health
{
400 struct health_buffer __iomem
*health
;
401 __be32 __iomem
*health_counter
;
402 struct timer_list timer
;
406 struct workqueue_struct
*wq
;
407 struct work_struct work
;
410 struct mlx5_cq_table
{
411 /* protect radix tree
414 struct radix_tree_root tree
;
417 struct mlx5_qp_table
{
418 /* protect radix tree
421 struct radix_tree_root tree
;
424 struct mlx5_srq_table
{
425 /* protect radix tree
428 struct radix_tree_root tree
;
431 struct mlx5_mkey_table
{
432 /* protect radix tree
435 struct radix_tree_root tree
;
438 struct mlx5_vf_context
{
442 struct mlx5_core_sriov
{
443 struct mlx5_vf_context
*vfs_ctx
;
448 struct mlx5_irq_info
{
450 char name
[MLX5_MAX_IRQ_NAME
];
456 char name
[MLX5_MAX_NAME_LEN
];
457 struct mlx5_eq_table eq_table
;
458 struct msix_entry
*msix_arr
;
459 struct mlx5_irq_info
*irq_info
;
460 struct mlx5_uuar_info uuari
;
461 MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock
);
464 struct workqueue_struct
*pg_wq
;
465 struct rb_root page_root
;
468 struct list_head free_list
;
471 struct mlx5_core_health health
;
473 struct mlx5_srq_table srq_table
;
475 /* start: qp staff */
476 struct mlx5_qp_table qp_table
;
477 struct dentry
*qp_debugfs
;
478 struct dentry
*eq_debugfs
;
479 struct dentry
*cq_debugfs
;
480 struct dentry
*cmdif_debugfs
;
483 /* start: cq staff */
484 struct mlx5_cq_table cq_table
;
487 /* start: mkey staff */
488 struct mlx5_mkey_table mkey_table
;
489 /* end: mkey staff */
491 /* start: alloc staff */
492 /* protect buffer alocation according to numa node */
493 struct mutex alloc_mutex
;
496 struct mutex pgdir_mutex
;
497 struct list_head pgdir_list
;
498 /* end: alloc staff */
499 struct dentry
*dbg_root
;
501 /* protect mkey key part */
502 spinlock_t mkey_lock
;
505 struct list_head dev_list
;
506 struct list_head ctx_list
;
509 struct mlx5_eswitch
*eswitch
;
510 struct mlx5_core_sriov sriov
;
511 unsigned long pci_dev_data
;
512 struct mlx5_flow_root_namespace
*root_ns
;
513 struct mlx5_flow_root_namespace
*fdb_root_ns
;
516 enum mlx5_device_state
{
517 MLX5_DEVICE_STATE_UP
,
518 MLX5_DEVICE_STATE_INTERNAL_ERROR
,
521 enum mlx5_interface_state
{
522 MLX5_INTERFACE_STATE_DOWN
= BIT(0),
523 MLX5_INTERFACE_STATE_UP
= BIT(1),
524 MLX5_INTERFACE_STATE_SHUTDOWN
= BIT(2),
527 enum mlx5_pci_status
{
528 MLX5_PCI_STATUS_DISABLED
,
529 MLX5_PCI_STATUS_ENABLED
,
532 struct mlx5_core_dev
{
533 struct pci_dev
*pdev
;
535 struct mutex pci_status_mutex
;
536 enum mlx5_pci_status pci_status
;
538 char board_id
[MLX5_BOARD_ID_LEN
];
540 struct mlx5_port_caps port_caps
[MLX5_MAX_PORTS
];
541 u32 hca_caps_cur
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
542 u32 hca_caps_max
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
543 phys_addr_t iseg_base
;
544 struct mlx5_init_seg __iomem
*iseg
;
545 enum mlx5_device_state state
;
546 /* sync interface state */
547 struct mutex intf_state_mutex
;
548 unsigned long intf_state
;
549 void (*event
) (struct mlx5_core_dev
*dev
,
550 enum mlx5_dev_event event
,
551 unsigned long param
);
552 struct mlx5_priv priv
;
553 struct mlx5_profile
*profile
;
561 struct mlx5_db_pgdir
*pgdir
;
562 struct mlx5_ib_user_db_page
*user_page
;
569 MLX5_DB_PER_PAGE
= PAGE_SIZE
/ L1_CACHE_BYTES
,
573 MLX5_COMP_EQ_SIZE
= 1024,
577 MLX5_PTYS_IB
= 1 << 0,
578 MLX5_PTYS_EN
= 1 << 2,
581 struct mlx5_db_pgdir
{
582 struct list_head list
;
583 DECLARE_BITMAP(bitmap
, MLX5_DB_PER_PAGE
);
588 typedef void (*mlx5_cmd_cbk_t
)(int status
, void *context
);
590 struct mlx5_cmd_work_ent
{
591 struct mlx5_cmd_msg
*in
;
592 struct mlx5_cmd_msg
*out
;
595 mlx5_cmd_cbk_t callback
;
598 struct completion done
;
599 struct mlx5_cmd
*cmd
;
600 struct work_struct work
;
601 struct mlx5_cmd_layout
*lay
;
616 enum port_state_policy
{
617 MLX5_POLICY_DOWN
= 0,
619 MLX5_POLICY_FOLLOW
= 2,
620 MLX5_POLICY_INVALID
= 0xffffffff
623 enum phy_port_state
{
627 struct mlx5_hca_vport_context
{
632 enum port_state_policy policy
;
633 enum phy_port_state phys_state
;
634 enum ib_port_state vport_state
;
635 u8 port_physical_state
;
644 u8 init_type_reply
; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
649 u16 qkey_violation_counter
;
650 u16 pkey_violation_counter
;
654 static inline void *mlx5_buf_offset(struct mlx5_buf
*buf
, int offset
)
656 return buf
->direct
.buf
+ offset
;
659 extern struct workqueue_struct
*mlx5_core_wq
;
661 #define STRUCT_FIELD(header, field) \
662 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
663 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
665 static inline struct mlx5_core_dev
*pci2mlx5_core_dev(struct pci_dev
*pdev
)
667 return pci_get_drvdata(pdev
);
670 extern struct dentry
*mlx5_debugfs_root
;
672 static inline u16
fw_rev_maj(struct mlx5_core_dev
*dev
)
674 return ioread32be(&dev
->iseg
->fw_rev
) & 0xffff;
677 static inline u16
fw_rev_min(struct mlx5_core_dev
*dev
)
679 return ioread32be(&dev
->iseg
->fw_rev
) >> 16;
682 static inline u16
fw_rev_sub(struct mlx5_core_dev
*dev
)
684 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) & 0xffff;
687 static inline u16
cmdif_rev(struct mlx5_core_dev
*dev
)
689 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
692 static inline void *mlx5_vzalloc(unsigned long size
)
696 rtn
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
702 static inline u32
mlx5_base_mkey(const u32 key
)
704 return key
& 0xffffff00u
;
707 int mlx5_cmd_init(struct mlx5_core_dev
*dev
);
708 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
);
709 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
);
710 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
);
711 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr
*hdr
);
712 int mlx5_cmd_status_to_err_v2(void *ptr
);
713 int mlx5_core_get_caps(struct mlx5_core_dev
*dev
, enum mlx5_cap_type cap_type
);
714 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
716 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
717 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
719 int mlx5_cmd_alloc_uar(struct mlx5_core_dev
*dev
, u32
*uarn
);
720 int mlx5_cmd_free_uar(struct mlx5_core_dev
*dev
, u32 uarn
);
721 int mlx5_alloc_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
);
722 int mlx5_free_uuars(struct mlx5_core_dev
*dev
, struct mlx5_uuar_info
*uuari
);
723 int mlx5_alloc_map_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
,
725 void mlx5_unmap_free_uar(struct mlx5_core_dev
*mdev
, struct mlx5_uar
*uar
);
726 void mlx5_health_cleanup(struct mlx5_core_dev
*dev
);
727 int mlx5_health_init(struct mlx5_core_dev
*dev
);
728 void mlx5_start_health_poll(struct mlx5_core_dev
*dev
);
729 void mlx5_stop_health_poll(struct mlx5_core_dev
*dev
);
730 int mlx5_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
731 struct mlx5_buf
*buf
, int node
);
732 int mlx5_buf_alloc(struct mlx5_core_dev
*dev
, int size
, struct mlx5_buf
*buf
);
733 void mlx5_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_buf
*buf
);
734 struct mlx5_cmd_mailbox
*mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
735 gfp_t flags
, int npages
);
736 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
737 struct mlx5_cmd_mailbox
*head
);
738 int mlx5_core_create_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
739 struct mlx5_create_srq_mbox_in
*in
, int inlen
,
741 int mlx5_core_destroy_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
);
742 int mlx5_core_query_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
743 struct mlx5_query_srq_mbox_out
*out
);
744 int mlx5_core_arm_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
745 u16 lwm
, int is_srq
);
746 void mlx5_init_mkey_table(struct mlx5_core_dev
*dev
);
747 void mlx5_cleanup_mkey_table(struct mlx5_core_dev
*dev
);
748 int mlx5_core_create_mkey(struct mlx5_core_dev
*dev
,
749 struct mlx5_core_mkey
*mkey
,
750 struct mlx5_create_mkey_mbox_in
*in
, int inlen
,
751 mlx5_cmd_cbk_t callback
, void *context
,
752 struct mlx5_create_mkey_mbox_out
*out
);
753 int mlx5_core_destroy_mkey(struct mlx5_core_dev
*dev
,
754 struct mlx5_core_mkey
*mkey
);
755 int mlx5_core_query_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*mkey
,
756 struct mlx5_query_mkey_mbox_out
*out
, int outlen
);
757 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*_mkey
,
759 int mlx5_core_alloc_pd(struct mlx5_core_dev
*dev
, u32
*pdn
);
760 int mlx5_core_dealloc_pd(struct mlx5_core_dev
*dev
, u32 pdn
);
761 int mlx5_core_mad_ifc(struct mlx5_core_dev
*dev
, const void *inb
, void *outb
,
763 void mlx5_pagealloc_init(struct mlx5_core_dev
*dev
);
764 void mlx5_pagealloc_cleanup(struct mlx5_core_dev
*dev
);
765 int mlx5_pagealloc_start(struct mlx5_core_dev
*dev
);
766 void mlx5_pagealloc_stop(struct mlx5_core_dev
*dev
);
767 int mlx5_sriov_init(struct mlx5_core_dev
*dev
);
768 int mlx5_sriov_cleanup(struct mlx5_core_dev
*dev
);
769 void mlx5_core_req_pages_handler(struct mlx5_core_dev
*dev
, u16 func_id
,
771 int mlx5_satisfy_startup_pages(struct mlx5_core_dev
*dev
, int boot
);
772 int mlx5_reclaim_startup_pages(struct mlx5_core_dev
*dev
);
773 void mlx5_register_debugfs(void);
774 void mlx5_unregister_debugfs(void);
775 int mlx5_eq_init(struct mlx5_core_dev
*dev
);
776 void mlx5_eq_cleanup(struct mlx5_core_dev
*dev
);
777 void mlx5_fill_page_array(struct mlx5_buf
*buf
, __be64
*pas
);
778 void mlx5_cq_completion(struct mlx5_core_dev
*dev
, u32 cqn
);
779 void mlx5_rsc_event(struct mlx5_core_dev
*dev
, u32 rsn
, int event_type
);
780 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
781 void mlx5_eq_pagefault(struct mlx5_core_dev
*dev
, struct mlx5_eqe
*eqe
);
783 void mlx5_srq_event(struct mlx5_core_dev
*dev
, u32 srqn
, int event_type
);
784 struct mlx5_core_srq
*mlx5_core_get_srq(struct mlx5_core_dev
*dev
, u32 srqn
);
785 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
);
786 void mlx5_cq_event(struct mlx5_core_dev
*dev
, u32 cqn
, int event_type
);
787 int mlx5_create_map_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
, u8 vecidx
,
788 int nent
, u64 mask
, const char *name
, struct mlx5_uar
*uar
);
789 int mlx5_destroy_unmap_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
790 int mlx5_start_eqs(struct mlx5_core_dev
*dev
);
791 int mlx5_stop_eqs(struct mlx5_core_dev
*dev
);
792 int mlx5_vector2eqn(struct mlx5_core_dev
*dev
, int vector
, int *eqn
,
794 int mlx5_core_attach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
795 int mlx5_core_detach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
797 int mlx5_qp_debugfs_init(struct mlx5_core_dev
*dev
);
798 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev
*dev
);
799 int mlx5_core_access_reg(struct mlx5_core_dev
*dev
, void *data_in
,
800 int size_in
, void *data_out
, int size_out
,
801 u16 reg_num
, int arg
, int write
);
803 int mlx5_debug_eq_add(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
804 void mlx5_debug_eq_remove(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
805 int mlx5_core_eq_query(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
806 struct mlx5_query_eq_mbox_out
*out
, int outlen
);
807 int mlx5_eq_debugfs_init(struct mlx5_core_dev
*dev
);
808 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
809 int mlx5_cq_debugfs_init(struct mlx5_core_dev
*dev
);
810 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
811 int mlx5_db_alloc(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
812 int mlx5_db_alloc_node(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
,
814 void mlx5_db_free(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
816 const char *mlx5_command_str(int command
);
817 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev
*dev
);
818 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev
*dev
);
819 int mlx5_core_create_psv(struct mlx5_core_dev
*dev
, u32 pdn
,
820 int npsvs
, u32
*sig_index
);
821 int mlx5_core_destroy_psv(struct mlx5_core_dev
*dev
, int psv_num
);
822 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
);
823 int mlx5_query_odp_caps(struct mlx5_core_dev
*dev
,
824 struct mlx5_odp_caps
*odp_caps
);
825 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev
*dev
,
826 u8 port_num
, void *out
, size_t sz
);
828 static inline int fw_initializing(struct mlx5_core_dev
*dev
)
830 return ioread32be(&dev
->iseg
->initializing
) >> 31;
833 static inline u32
mlx5_mkey_to_idx(u32 mkey
)
838 static inline u32
mlx5_idx_to_mkey(u32 mkey_idx
)
840 return mkey_idx
<< 8;
843 static inline u8
mlx5_mkey_variant(u32 mkey
)
849 MLX5_PROF_MASK_QP_SIZE
= (u64
)1 << 0,
850 MLX5_PROF_MASK_MR_CACHE
= (u64
)1 << 1,
854 MAX_MR_CACHE_ENTRIES
= 16,
858 MLX5_INTERFACE_PROTOCOL_IB
= 0,
859 MLX5_INTERFACE_PROTOCOL_ETH
= 1,
862 struct mlx5_interface
{
863 void * (*add
)(struct mlx5_core_dev
*dev
);
864 void (*remove
)(struct mlx5_core_dev
*dev
, void *context
);
865 void (*event
)(struct mlx5_core_dev
*dev
, void *context
,
866 enum mlx5_dev_event event
, unsigned long param
);
867 void * (*get_dev
)(void *context
);
869 struct list_head list
;
872 void *mlx5_get_protocol_dev(struct mlx5_core_dev
*mdev
, int protocol
);
873 int mlx5_register_interface(struct mlx5_interface
*intf
);
874 void mlx5_unregister_interface(struct mlx5_interface
*intf
);
875 int mlx5_core_query_vendor_id(struct mlx5_core_dev
*mdev
, u32
*vendor_id
);
877 struct mlx5_profile
{
883 } mr_cache
[MAX_MR_CACHE_ENTRIES
];
887 MLX5_PCI_DEV_IS_VF
= 1 << 0,
890 static inline int mlx5_core_is_pf(struct mlx5_core_dev
*dev
)
892 return !(dev
->priv
.pci_dev_data
& MLX5_PCI_DEV_IS_VF
);
895 static inline int mlx5_get_gid_table_len(u16 param
)
898 pr_warn("gid table length is zero\n");
902 return 8 * (1 << param
);
906 MLX5_TRIGGERED_CMD_COMP
= (u64
)1 << 32,
909 #endif /* MLX5_DRIVER_H */