2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/kernel.h>
37 #include <linux/completion.h>
38 #include <linux/pci.h>
39 #include <linux/irq.h>
40 #include <linux/spinlock_types.h>
41 #include <linux/semaphore.h>
42 #include <linux/slab.h>
43 #include <linux/vmalloc.h>
44 #include <linux/radix-tree.h>
45 #include <linux/workqueue.h>
46 #include <linux/mempool.h>
47 #include <linux/interrupt.h>
48 #include <linux/idr.h>
50 #include <linux/mlx5/device.h>
51 #include <linux/mlx5/doorbell.h>
52 #include <linux/mlx5/srq.h>
53 #include <linux/timecounter.h>
54 #include <linux/ptp_clock_kernel.h>
57 MLX5_BOARD_ID_LEN
= 64,
58 MLX5_MAX_NAME_LEN
= 16,
62 /* one minute for the sake of bringup. Generally, commands must always
63 * complete and we may need to increase this timeout value
65 MLX5_CMD_TIMEOUT_MSEC
= 60 * 1000,
66 MLX5_CMD_WQ_MAX_NAME
= 32,
72 CMD_STATUS_SUCCESS
= 0,
78 MLX5_SQP_IEEE_1588
= 2,
80 MLX5_SQP_SYNC_UMR
= 4,
88 MLX5_EQ_VEC_PAGES
= 0,
90 MLX5_EQ_VEC_ASYNC
= 2,
91 MLX5_EQ_VEC_PFAULT
= 3,
92 MLX5_EQ_VEC_COMP_BASE
,
96 MLX5_MAX_IRQ_NAME
= 32
100 MLX5_ATOMIC_MODE_IB_COMP
= 1 << 16,
101 MLX5_ATOMIC_MODE_CX
= 2 << 16,
102 MLX5_ATOMIC_MODE_8B
= 3 << 16,
103 MLX5_ATOMIC_MODE_16B
= 4 << 16,
104 MLX5_ATOMIC_MODE_32B
= 5 << 16,
105 MLX5_ATOMIC_MODE_64B
= 6 << 16,
106 MLX5_ATOMIC_MODE_128B
= 7 << 16,
107 MLX5_ATOMIC_MODE_256B
= 8 << 16,
111 MLX5_REG_QPTS
= 0x4002,
112 MLX5_REG_QETCR
= 0x4005,
113 MLX5_REG_QTCT
= 0x400a,
114 MLX5_REG_QPDPM
= 0x4013,
115 MLX5_REG_QCAM
= 0x4019,
116 MLX5_REG_DCBX_PARAM
= 0x4020,
117 MLX5_REG_DCBX_APP
= 0x4021,
118 MLX5_REG_FPGA_CAP
= 0x4022,
119 MLX5_REG_FPGA_CTRL
= 0x4023,
120 MLX5_REG_FPGA_ACCESS_REG
= 0x4024,
121 MLX5_REG_PCAP
= 0x5001,
122 MLX5_REG_PMTU
= 0x5003,
123 MLX5_REG_PTYS
= 0x5004,
124 MLX5_REG_PAOS
= 0x5006,
125 MLX5_REG_PFCC
= 0x5007,
126 MLX5_REG_PPCNT
= 0x5008,
127 MLX5_REG_PMAOS
= 0x5012,
128 MLX5_REG_PUDE
= 0x5009,
129 MLX5_REG_PMPE
= 0x5010,
130 MLX5_REG_PELC
= 0x500e,
131 MLX5_REG_PVLC
= 0x500f,
132 MLX5_REG_PCMR
= 0x5041,
133 MLX5_REG_PMLP
= 0x5002,
134 MLX5_REG_PCAM
= 0x507f,
135 MLX5_REG_NODE_DESC
= 0x6001,
136 MLX5_REG_HOST_ENDIANNESS
= 0x7004,
137 MLX5_REG_MCIA
= 0x9014,
138 MLX5_REG_MLCR
= 0x902b,
139 MLX5_REG_MPCNT
= 0x9051,
140 MLX5_REG_MTPPS
= 0x9053,
141 MLX5_REG_MTPPSE
= 0x9054,
142 MLX5_REG_MCQI
= 0x9061,
143 MLX5_REG_MCC
= 0x9062,
144 MLX5_REG_MCDA
= 0x9063,
145 MLX5_REG_MCAM
= 0x907f,
148 enum mlx5_qpts_trust_state
{
149 MLX5_QPTS_TRUST_PCP
= 1,
150 MLX5_QPTS_TRUST_DSCP
= 2,
153 enum mlx5_dcbx_oper_mode
{
154 MLX5E_DCBX_PARAM_VER_OPER_HOST
= 0x0,
155 MLX5E_DCBX_PARAM_VER_OPER_AUTO
= 0x3,
158 enum mlx5_dct_atomic_mode
{
159 MLX5_ATOMIC_MODE_DCT_OFF
= 20,
160 MLX5_ATOMIC_MODE_DCT_NONE
= 0 << MLX5_ATOMIC_MODE_DCT_OFF
,
161 MLX5_ATOMIC_MODE_DCT_IB_COMP
= 1 << MLX5_ATOMIC_MODE_DCT_OFF
,
162 MLX5_ATOMIC_MODE_DCT_CX
= 2 << MLX5_ATOMIC_MODE_DCT_OFF
,
166 MLX5_ATOMIC_OPS_CMP_SWAP
= 1 << 0,
167 MLX5_ATOMIC_OPS_FETCH_ADD
= 1 << 1,
170 enum mlx5_page_fault_resume_flags
{
171 MLX5_PAGE_FAULT_RESUME_REQUESTOR
= 1 << 0,
172 MLX5_PAGE_FAULT_RESUME_WRITE
= 1 << 1,
173 MLX5_PAGE_FAULT_RESUME_RDMA
= 1 << 2,
174 MLX5_PAGE_FAULT_RESUME_ERROR
= 1 << 7,
183 enum port_state_policy
{
184 MLX5_POLICY_DOWN
= 0,
186 MLX5_POLICY_FOLLOW
= 2,
187 MLX5_POLICY_INVALID
= 0xffffffff
190 struct mlx5_field_desc
{
195 struct mlx5_rsc_debug
{
196 struct mlx5_core_dev
*dev
;
198 enum dbg_rsc_type type
;
200 struct mlx5_field_desc fields
[0];
203 enum mlx5_dev_event
{
204 MLX5_DEV_EVENT_SYS_ERROR
,
205 MLX5_DEV_EVENT_PORT_UP
,
206 MLX5_DEV_EVENT_PORT_DOWN
,
207 MLX5_DEV_EVENT_PORT_INITIALIZED
,
208 MLX5_DEV_EVENT_LID_CHANGE
,
209 MLX5_DEV_EVENT_PKEY_CHANGE
,
210 MLX5_DEV_EVENT_GUID_CHANGE
,
211 MLX5_DEV_EVENT_CLIENT_REREG
,
213 MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT
,
216 enum mlx5_port_status
{
224 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
229 struct mlx5_bfreg_info
{
231 int num_low_latency_bfregs
;
235 * protect bfreg allocation data structs
241 u32 num_static_sys_pages
;
242 u32 total_num_bfregs
;
246 struct mlx5_cmd_first
{
250 struct mlx5_cmd_msg
{
251 struct list_head list
;
252 struct cmd_msg_cache
*parent
;
254 struct mlx5_cmd_first first
;
255 struct mlx5_cmd_mailbox
*next
;
258 struct mlx5_cmd_debug
{
259 struct dentry
*dbg_root
;
260 struct dentry
*dbg_in
;
261 struct dentry
*dbg_out
;
262 struct dentry
*dbg_outlen
;
263 struct dentry
*dbg_status
;
264 struct dentry
*dbg_run
;
272 struct cmd_msg_cache
{
273 /* protect block chain allocations
276 struct list_head head
;
277 unsigned int max_inbox_size
;
278 unsigned int num_ent
;
282 MLX5_NUM_COMMAND_CACHES
= 5,
285 struct mlx5_cmd_stats
{
290 struct dentry
*count
;
291 /* protect command average calculations */
297 dma_addr_t alloc_dma
;
308 /* protect command queue allocations
310 spinlock_t alloc_lock
;
312 /* protect token allocations
314 spinlock_t token_lock
;
316 unsigned long bitmask
;
317 char wq_name
[MLX5_CMD_WQ_MAX_NAME
];
318 struct workqueue_struct
*wq
;
319 struct semaphore sem
;
320 struct semaphore pages_sem
;
322 struct mlx5_cmd_work_ent
*ent_arr
[MLX5_MAX_COMMANDS
];
323 struct dma_pool
*pool
;
324 struct mlx5_cmd_debug dbg
;
325 struct cmd_msg_cache cache
[MLX5_NUM_COMMAND_CACHES
];
326 int checksum_disabled
;
327 struct mlx5_cmd_stats stats
[MLX5_CMD_OP_MAX
];
330 struct mlx5_port_caps
{
337 struct mlx5_cmd_mailbox
{
340 struct mlx5_cmd_mailbox
*next
;
343 struct mlx5_buf_list
{
349 struct mlx5_buf_list direct
;
355 struct mlx5_frag_buf
{
356 struct mlx5_buf_list
*frags
;
362 struct mlx5_eq_tasklet
{
363 struct list_head list
;
364 struct list_head process_list
;
365 struct tasklet_struct task
;
366 /* lock on completion tasklet list */
370 struct mlx5_eq_pagefault
{
371 struct work_struct work
;
372 /* Pagefaults lock */
374 struct workqueue_struct
*wq
;
379 struct mlx5_core_dev
*dev
;
380 __be32 __iomem
*doorbell
;
388 struct list_head list
;
390 struct mlx5_rsc_debug
*dbg
;
391 enum mlx5_eq_type type
;
393 struct mlx5_eq_tasklet tasklet_ctx
;
394 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
395 struct mlx5_eq_pagefault pf_ctx
;
400 struct mlx5_core_psv
{
412 struct mlx5_core_sig_ctx
{
413 struct mlx5_core_psv psv_memory
;
414 struct mlx5_core_psv psv_wire
;
415 struct ib_sig_err err_item
;
416 bool sig_status_checked
;
426 struct mlx5_core_mkey
{
434 #define MLX5_24BIT_MASK ((1 << 24) - 1)
437 MLX5_RES_QP
= MLX5_EVENT_QUEUE_TYPE_QP
,
438 MLX5_RES_RQ
= MLX5_EVENT_QUEUE_TYPE_RQ
,
439 MLX5_RES_SQ
= MLX5_EVENT_QUEUE_TYPE_SQ
,
443 MLX5_RES_DCT
= MLX5_EVENT_QUEUE_TYPE_DCT
,
446 struct mlx5_core_rsc_common
{
447 enum mlx5_res_type res
;
449 struct completion free
;
452 struct mlx5_core_srq
{
453 struct mlx5_core_rsc_common common
; /* must be first */
457 int max_avail_gather
;
459 void (*event
) (struct mlx5_core_srq
*, enum mlx5_event
);
462 struct completion free
;
465 struct mlx5_eq_table
{
466 void __iomem
*update_ci
;
467 void __iomem
*update_arm_ci
;
468 struct list_head comp_eqs_list
;
469 struct mlx5_eq pages_eq
;
470 struct mlx5_eq async_eq
;
471 struct mlx5_eq cmd_eq
;
472 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
473 struct mlx5_eq pfault_eq
;
475 int num_comp_vectors
;
481 struct mlx5_uars_page
{
485 struct list_head list
;
487 unsigned long *reg_bitmap
; /* for non fast path bf regs */
488 unsigned long *fp_bitmap
;
489 unsigned int reg_avail
;
490 unsigned int fp_avail
;
491 struct kref ref_count
;
492 struct mlx5_core_dev
*mdev
;
495 struct mlx5_bfreg_head
{
496 /* protect blue flame registers allocations */
498 struct list_head list
;
501 struct mlx5_bfreg_data
{
502 struct mlx5_bfreg_head reg_head
;
503 struct mlx5_bfreg_head wc_head
;
506 struct mlx5_sq_bfreg
{
508 struct mlx5_uars_page
*up
;
514 struct mlx5_core_health
{
515 struct health_buffer __iomem
*health
;
516 __be32 __iomem
*health_counter
;
517 struct timer_list timer
;
521 /* wq spinlock to synchronize draining */
523 struct workqueue_struct
*wq
;
525 struct work_struct work
;
526 struct delayed_work recover_work
;
529 struct mlx5_cq_table
{
530 /* protect radix tree
533 struct radix_tree_root tree
;
536 struct mlx5_qp_table
{
537 /* protect radix tree
540 struct radix_tree_root tree
;
543 struct mlx5_srq_table
{
544 /* protect radix tree
547 struct radix_tree_root tree
;
550 struct mlx5_mkey_table
{
551 /* protect radix tree
554 struct radix_tree_root tree
;
557 struct mlx5_vf_context
{
561 enum port_state_policy policy
;
564 struct mlx5_core_sriov
{
565 struct mlx5_vf_context
*vfs_ctx
;
570 struct mlx5_irq_info
{
572 char name
[MLX5_MAX_IRQ_NAME
];
575 struct mlx5_fc_stats
{
576 struct rb_root counters
;
577 struct list_head addlist
;
578 /* protect addlist add/splice operations */
579 spinlock_t addlist_lock
;
581 struct workqueue_struct
*wq
;
582 struct delayed_work work
;
583 unsigned long next_query
;
584 unsigned long sampling_interval
; /* jiffies */
590 struct mlx5_pagefault
;
592 struct mlx5_rl_entry
{
598 struct mlx5_rl_table
{
599 /* protect rate limit table */
600 struct mutex rl_lock
;
604 struct mlx5_rl_entry
*rl_entry
;
607 enum port_module_event_status_type
{
608 MLX5_MODULE_STATUS_PLUGGED
= 0x1,
609 MLX5_MODULE_STATUS_UNPLUGGED
= 0x2,
610 MLX5_MODULE_STATUS_ERROR
= 0x3,
611 MLX5_MODULE_STATUS_NUM
= 0x3,
614 enum port_module_event_error_type
{
615 MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED
,
616 MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE
,
617 MLX5_MODULE_EVENT_ERROR_BUS_STUCK
,
618 MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT
,
619 MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST
,
620 MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER
,
621 MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE
,
622 MLX5_MODULE_EVENT_ERROR_BAD_CABLE
,
623 MLX5_MODULE_EVENT_ERROR_UNKNOWN
,
624 MLX5_MODULE_EVENT_ERROR_NUM
,
627 struct mlx5_port_module_event_stats
{
628 u64 status_counters
[MLX5_MODULE_STATUS_NUM
];
629 u64 error_counters
[MLX5_MODULE_EVENT_ERROR_NUM
];
633 char name
[MLX5_MAX_NAME_LEN
];
634 struct mlx5_eq_table eq_table
;
635 struct mlx5_irq_info
*irq_info
;
638 struct workqueue_struct
*pg_wq
;
639 struct rb_root page_root
;
642 struct list_head free_list
;
645 struct mlx5_core_health health
;
647 struct mlx5_srq_table srq_table
;
649 /* start: qp staff */
650 struct mlx5_qp_table qp_table
;
651 struct dentry
*qp_debugfs
;
652 struct dentry
*eq_debugfs
;
653 struct dentry
*cq_debugfs
;
654 struct dentry
*cmdif_debugfs
;
657 /* start: cq staff */
658 struct mlx5_cq_table cq_table
;
661 /* start: mkey staff */
662 struct mlx5_mkey_table mkey_table
;
663 /* end: mkey staff */
665 /* start: alloc staff */
666 /* protect buffer alocation according to numa node */
667 struct mutex alloc_mutex
;
670 struct mutex pgdir_mutex
;
671 struct list_head pgdir_list
;
672 /* end: alloc staff */
673 struct dentry
*dbg_root
;
675 /* protect mkey key part */
676 spinlock_t mkey_lock
;
679 struct list_head dev_list
;
680 struct list_head ctx_list
;
683 struct list_head waiting_events_list
;
684 bool is_accum_events
;
686 struct mlx5_flow_steering
*steering
;
687 struct mlx5_mpfs
*mpfs
;
688 struct mlx5_eswitch
*eswitch
;
689 struct mlx5_core_sriov sriov
;
690 struct mlx5_lag
*lag
;
691 unsigned long pci_dev_data
;
692 struct mlx5_fc_stats fc_stats
;
693 struct mlx5_rl_table rl_table
;
695 struct mlx5_port_module_event_stats pme_stats
;
697 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
698 void (*pfault
)(struct mlx5_core_dev
*dev
,
700 struct mlx5_pagefault
*pfault
);
702 struct srcu_struct pfault_srcu
;
704 struct mlx5_bfreg_data bfregs
;
705 struct mlx5_uars_page
*uar
;
708 enum mlx5_device_state
{
709 MLX5_DEVICE_STATE_UP
,
710 MLX5_DEVICE_STATE_INTERNAL_ERROR
,
713 enum mlx5_interface_state
{
714 MLX5_INTERFACE_STATE_UP
= BIT(0),
717 enum mlx5_pci_status
{
718 MLX5_PCI_STATUS_DISABLED
,
719 MLX5_PCI_STATUS_ENABLED
,
722 enum mlx5_pagefault_type_flags
{
723 MLX5_PFAULT_REQUESTOR
= 1 << 0,
724 MLX5_PFAULT_WRITE
= 1 << 1,
725 MLX5_PFAULT_RDMA
= 1 << 2,
728 /* Contains the details of a pagefault. */
729 struct mlx5_pagefault
{
735 /* Initiator or send message responder pagefault details. */
737 /* Received packet size, only valid for responders. */
740 * Number of resource holding WQE, depends on type.
744 * WQE index. Refers to either the send queue or
745 * receive queue, according to event_subtype.
749 /* RDMA responder pagefault details */
753 * Received packet size, minimal size page fault
754 * resolution required for forward progress.
763 struct work_struct work
;
767 struct list_head tirs_list
;
771 struct mlx5e_resources
{
774 struct mlx5_core_mkey mkey
;
775 struct mlx5_sq_bfreg bfreg
;
778 #define MLX5_MAX_RESERVED_GIDS 8
780 struct mlx5_rsvd_gids
{
786 #define MAX_PIN_NUM 8
788 u8 pin_caps
[MAX_PIN_NUM
];
789 struct work_struct out_work
;
790 u64 start
[MAX_PIN_NUM
];
796 struct cyclecounter cycles
;
797 struct timecounter tc
;
798 struct hwtstamp_config hwtstamp_config
;
800 unsigned long overflow_period
;
801 struct delayed_work overflow_work
;
802 struct mlx5_core_dev
*mdev
;
803 struct ptp_clock
*ptp
;
804 struct ptp_clock_info ptp_info
;
805 struct mlx5_pps pps_info
;
808 struct mlx5_core_dev
{
809 struct pci_dev
*pdev
;
811 struct mutex pci_status_mutex
;
812 enum mlx5_pci_status pci_status
;
814 char board_id
[MLX5_BOARD_ID_LEN
];
816 struct mlx5_port_caps port_caps
[MLX5_MAX_PORTS
];
818 u32 hca_cur
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
819 u32 hca_max
[MLX5_CAP_NUM
][MLX5_UN_SZ_DW(hca_cap_union
)];
820 u32 pcam
[MLX5_ST_SZ_DW(pcam_reg
)];
821 u32 mcam
[MLX5_ST_SZ_DW(mcam_reg
)];
822 u32 fpga
[MLX5_ST_SZ_DW(fpga_cap
)];
823 u32 qcam
[MLX5_ST_SZ_DW(qcam_reg
)];
825 phys_addr_t iseg_base
;
826 struct mlx5_init_seg __iomem
*iseg
;
827 enum mlx5_device_state state
;
828 /* sync interface state */
829 struct mutex intf_state_mutex
;
830 unsigned long intf_state
;
831 void (*event
) (struct mlx5_core_dev
*dev
,
832 enum mlx5_dev_event event
,
833 unsigned long param
);
834 struct mlx5_priv priv
;
835 struct mlx5_profile
*profile
;
838 struct mlx5e_resources mlx5e_res
;
840 struct mlx5_rsvd_gids reserved_gids
;
843 #ifdef CONFIG_MLX5_FPGA
844 struct mlx5_fpga_device
*fpga
;
846 #ifdef CONFIG_RFS_ACCEL
847 struct cpu_rmap
*rmap
;
849 struct mlx5_clock clock
;
850 struct mlx5_ib_clock_info
*clock_info
;
851 struct page
*clock_info_page
;
857 struct mlx5_db_pgdir
*pgdir
;
858 struct mlx5_ib_user_db_page
*user_page
;
865 MLX5_COMP_EQ_SIZE
= 1024,
869 MLX5_PTYS_IB
= 1 << 0,
870 MLX5_PTYS_EN
= 1 << 2,
873 typedef void (*mlx5_cmd_cbk_t
)(int status
, void *context
);
876 MLX5_CMD_ENT_STATE_PENDING_COMP
,
879 struct mlx5_cmd_work_ent
{
881 struct mlx5_cmd_msg
*in
;
882 struct mlx5_cmd_msg
*out
;
885 mlx5_cmd_cbk_t callback
;
886 struct delayed_work cb_timeout_work
;
889 struct completion done
;
890 struct mlx5_cmd
*cmd
;
891 struct work_struct work
;
892 struct mlx5_cmd_layout
*lay
;
908 enum phy_port_state
{
912 struct mlx5_hca_vport_context
{
917 enum port_state_policy policy
;
918 enum phy_port_state phys_state
;
919 enum ib_port_state vport_state
;
920 u8 port_physical_state
;
929 u8 init_type_reply
; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
934 u16 qkey_violation_counter
;
935 u16 pkey_violation_counter
;
939 static inline void *mlx5_buf_offset(struct mlx5_buf
*buf
, int offset
)
941 return buf
->direct
.buf
+ offset
;
944 #define STRUCT_FIELD(header, field) \
945 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
946 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
948 static inline struct mlx5_core_dev
*pci2mlx5_core_dev(struct pci_dev
*pdev
)
950 return pci_get_drvdata(pdev
);
953 extern struct dentry
*mlx5_debugfs_root
;
955 static inline u16
fw_rev_maj(struct mlx5_core_dev
*dev
)
957 return ioread32be(&dev
->iseg
->fw_rev
) & 0xffff;
960 static inline u16
fw_rev_min(struct mlx5_core_dev
*dev
)
962 return ioread32be(&dev
->iseg
->fw_rev
) >> 16;
965 static inline u16
fw_rev_sub(struct mlx5_core_dev
*dev
)
967 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) & 0xffff;
970 static inline u16
cmdif_rev(struct mlx5_core_dev
*dev
)
972 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
975 static inline u32
mlx5_base_mkey(const u32 key
)
977 return key
& 0xffffff00u
;
980 int mlx5_cmd_init(struct mlx5_core_dev
*dev
);
981 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
);
982 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
);
983 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
);
985 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
987 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
988 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
990 int mlx5_cmd_exec_polling(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
991 void *out
, int out_size
);
992 void mlx5_cmd_mbox_status(void *out
, u8
*status
, u32
*syndrome
);
994 int mlx5_core_get_caps(struct mlx5_core_dev
*dev
, enum mlx5_cap_type cap_type
);
995 int mlx5_cmd_alloc_uar(struct mlx5_core_dev
*dev
, u32
*uarn
);
996 int mlx5_cmd_free_uar(struct mlx5_core_dev
*dev
, u32 uarn
);
997 void mlx5_health_cleanup(struct mlx5_core_dev
*dev
);
998 int mlx5_health_init(struct mlx5_core_dev
*dev
);
999 void mlx5_start_health_poll(struct mlx5_core_dev
*dev
);
1000 void mlx5_stop_health_poll(struct mlx5_core_dev
*dev
);
1001 void mlx5_drain_health_wq(struct mlx5_core_dev
*dev
);
1002 void mlx5_trigger_health_work(struct mlx5_core_dev
*dev
);
1003 void mlx5_drain_health_recovery(struct mlx5_core_dev
*dev
);
1004 int mlx5_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
1005 struct mlx5_buf
*buf
, int node
);
1006 int mlx5_buf_alloc(struct mlx5_core_dev
*dev
, int size
, struct mlx5_buf
*buf
);
1007 void mlx5_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_buf
*buf
);
1008 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
1009 struct mlx5_frag_buf
*buf
, int node
);
1010 void mlx5_frag_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_frag_buf
*buf
);
1011 struct mlx5_cmd_mailbox
*mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
1012 gfp_t flags
, int npages
);
1013 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev
*dev
,
1014 struct mlx5_cmd_mailbox
*head
);
1015 int mlx5_core_create_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
1016 struct mlx5_srq_attr
*in
);
1017 int mlx5_core_destroy_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
);
1018 int mlx5_core_query_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
1019 struct mlx5_srq_attr
*out
);
1020 int mlx5_core_arm_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
1021 u16 lwm
, int is_srq
);
1022 void mlx5_init_mkey_table(struct mlx5_core_dev
*dev
);
1023 void mlx5_cleanup_mkey_table(struct mlx5_core_dev
*dev
);
1024 int mlx5_core_create_mkey_cb(struct mlx5_core_dev
*dev
,
1025 struct mlx5_core_mkey
*mkey
,
1027 u32
*out
, int outlen
,
1028 mlx5_cmd_cbk_t callback
, void *context
);
1029 int mlx5_core_create_mkey(struct mlx5_core_dev
*dev
,
1030 struct mlx5_core_mkey
*mkey
,
1031 u32
*in
, int inlen
);
1032 int mlx5_core_destroy_mkey(struct mlx5_core_dev
*dev
,
1033 struct mlx5_core_mkey
*mkey
);
1034 int mlx5_core_query_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*mkey
,
1035 u32
*out
, int outlen
);
1036 int mlx5_core_dump_fill_mkey(struct mlx5_core_dev
*dev
, struct mlx5_core_mkey
*_mkey
,
1038 int mlx5_core_alloc_pd(struct mlx5_core_dev
*dev
, u32
*pdn
);
1039 int mlx5_core_dealloc_pd(struct mlx5_core_dev
*dev
, u32 pdn
);
1040 int mlx5_core_mad_ifc(struct mlx5_core_dev
*dev
, const void *inb
, void *outb
,
1041 u16 opmod
, u8 port
);
1042 void mlx5_pagealloc_init(struct mlx5_core_dev
*dev
);
1043 void mlx5_pagealloc_cleanup(struct mlx5_core_dev
*dev
);
1044 int mlx5_pagealloc_start(struct mlx5_core_dev
*dev
);
1045 void mlx5_pagealloc_stop(struct mlx5_core_dev
*dev
);
1046 void mlx5_core_req_pages_handler(struct mlx5_core_dev
*dev
, u16 func_id
,
1048 int mlx5_satisfy_startup_pages(struct mlx5_core_dev
*dev
, int boot
);
1049 int mlx5_reclaim_startup_pages(struct mlx5_core_dev
*dev
);
1050 void mlx5_register_debugfs(void);
1051 void mlx5_unregister_debugfs(void);
1052 int mlx5_eq_init(struct mlx5_core_dev
*dev
);
1053 void mlx5_eq_cleanup(struct mlx5_core_dev
*dev
);
1054 void mlx5_fill_page_array(struct mlx5_buf
*buf
, __be64
*pas
);
1055 void mlx5_fill_page_frag_array(struct mlx5_frag_buf
*frag_buf
, __be64
*pas
);
1056 void mlx5_cq_completion(struct mlx5_core_dev
*dev
, u32 cqn
);
1057 void mlx5_rsc_event(struct mlx5_core_dev
*dev
, u32 rsn
, int event_type
);
1058 void mlx5_srq_event(struct mlx5_core_dev
*dev
, u32 srqn
, int event_type
);
1059 struct mlx5_core_srq
*mlx5_core_get_srq(struct mlx5_core_dev
*dev
, u32 srqn
);
1060 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
, bool forced
);
1061 void mlx5_cq_event(struct mlx5_core_dev
*dev
, u32 cqn
, int event_type
);
1062 int mlx5_create_map_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
, u8 vecidx
,
1063 int nent
, u64 mask
, const char *name
,
1064 enum mlx5_eq_type type
);
1065 int mlx5_destroy_unmap_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
1066 int mlx5_start_eqs(struct mlx5_core_dev
*dev
);
1067 void mlx5_stop_eqs(struct mlx5_core_dev
*dev
);
1068 int mlx5_vector2eqn(struct mlx5_core_dev
*dev
, int vector
, int *eqn
,
1069 unsigned int *irqn
);
1070 int mlx5_core_attach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
1071 int mlx5_core_detach_mcg(struct mlx5_core_dev
*dev
, union ib_gid
*mgid
, u32 qpn
);
1073 int mlx5_qp_debugfs_init(struct mlx5_core_dev
*dev
);
1074 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev
*dev
);
1075 int mlx5_core_access_reg(struct mlx5_core_dev
*dev
, void *data_in
,
1076 int size_in
, void *data_out
, int size_out
,
1077 u16 reg_num
, int arg
, int write
);
1079 int mlx5_debug_eq_add(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
1080 void mlx5_debug_eq_remove(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
1081 int mlx5_core_eq_query(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
1082 u32
*out
, int outlen
);
1083 int mlx5_eq_debugfs_init(struct mlx5_core_dev
*dev
);
1084 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
1085 int mlx5_cq_debugfs_init(struct mlx5_core_dev
*dev
);
1086 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev
*dev
);
1087 int mlx5_db_alloc(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
1088 int mlx5_db_alloc_node(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
,
1090 void mlx5_db_free(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
);
1092 const char *mlx5_command_str(int command
);
1093 int mlx5_cmdif_debugfs_init(struct mlx5_core_dev
*dev
);
1094 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev
*dev
);
1095 int mlx5_core_create_psv(struct mlx5_core_dev
*dev
, u32 pdn
,
1096 int npsvs
, u32
*sig_index
);
1097 int mlx5_core_destroy_psv(struct mlx5_core_dev
*dev
, int psv_num
);
1098 void mlx5_core_put_rsc(struct mlx5_core_rsc_common
*common
);
1099 int mlx5_query_odp_caps(struct mlx5_core_dev
*dev
,
1100 struct mlx5_odp_caps
*odp_caps
);
1101 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev
*dev
,
1102 u8 port_num
, void *out
, size_t sz
);
1103 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1104 int mlx5_core_page_fault_resume(struct mlx5_core_dev
*dev
, u32 token
,
1105 u32 wq_num
, u8 type
, int error
);
1108 int mlx5_init_rl_table(struct mlx5_core_dev
*dev
);
1109 void mlx5_cleanup_rl_table(struct mlx5_core_dev
*dev
);
1110 int mlx5_rl_add_rate(struct mlx5_core_dev
*dev
, u32 rate
, u16
*index
);
1111 void mlx5_rl_remove_rate(struct mlx5_core_dev
*dev
, u32 rate
);
1112 bool mlx5_rl_is_in_range(struct mlx5_core_dev
*dev
, u32 rate
);
1113 int mlx5_alloc_bfreg(struct mlx5_core_dev
*mdev
, struct mlx5_sq_bfreg
*bfreg
,
1114 bool map_wc
, bool fast_path
);
1115 void mlx5_free_bfreg(struct mlx5_core_dev
*mdev
, struct mlx5_sq_bfreg
*bfreg
);
1117 unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev
*dev
);
1118 int mlx5_core_roce_gid_set(struct mlx5_core_dev
*dev
, unsigned int index
,
1119 u8 roce_version
, u8 roce_l3_type
, const u8
*gid
,
1120 const u8
*mac
, bool vlan
, u16 vlan_id
, u8 port_num
);
1122 static inline int fw_initializing(struct mlx5_core_dev
*dev
)
1124 return ioread32be(&dev
->iseg
->initializing
) >> 31;
1127 static inline u32
mlx5_mkey_to_idx(u32 mkey
)
1132 static inline u32
mlx5_idx_to_mkey(u32 mkey_idx
)
1134 return mkey_idx
<< 8;
1137 static inline u8
mlx5_mkey_variant(u32 mkey
)
1143 MLX5_PROF_MASK_QP_SIZE
= (u64
)1 << 0,
1144 MLX5_PROF_MASK_MR_CACHE
= (u64
)1 << 1,
1148 MR_CACHE_LAST_STD_ENTRY
= 20,
1149 MLX5_IMR_MTT_CACHE_ENTRY
,
1150 MLX5_IMR_KSM_CACHE_ENTRY
,
1151 MAX_MR_CACHE_ENTRIES
1155 MLX5_INTERFACE_PROTOCOL_IB
= 0,
1156 MLX5_INTERFACE_PROTOCOL_ETH
= 1,
1159 struct mlx5_interface
{
1160 void * (*add
)(struct mlx5_core_dev
*dev
);
1161 void (*remove
)(struct mlx5_core_dev
*dev
, void *context
);
1162 int (*attach
)(struct mlx5_core_dev
*dev
, void *context
);
1163 void (*detach
)(struct mlx5_core_dev
*dev
, void *context
);
1164 void (*event
)(struct mlx5_core_dev
*dev
, void *context
,
1165 enum mlx5_dev_event event
, unsigned long param
);
1166 void (*pfault
)(struct mlx5_core_dev
*dev
,
1168 struct mlx5_pagefault
*pfault
);
1169 void * (*get_dev
)(void *context
);
1171 struct list_head list
;
1174 void *mlx5_get_protocol_dev(struct mlx5_core_dev
*mdev
, int protocol
);
1175 int mlx5_register_interface(struct mlx5_interface
*intf
);
1176 void mlx5_unregister_interface(struct mlx5_interface
*intf
);
1177 int mlx5_core_query_vendor_id(struct mlx5_core_dev
*mdev
, u32
*vendor_id
);
1179 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev
*dev
);
1180 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev
*dev
);
1181 bool mlx5_lag_is_active(struct mlx5_core_dev
*dev
);
1182 struct net_device
*mlx5_lag_get_roce_netdev(struct mlx5_core_dev
*dev
);
1183 int mlx5_lag_query_cong_counters(struct mlx5_core_dev
*dev
,
1187 struct mlx5_uars_page
*mlx5_get_uars_page(struct mlx5_core_dev
*mdev
);
1188 void mlx5_put_uars_page(struct mlx5_core_dev
*mdev
, struct mlx5_uars_page
*up
);
1190 #ifndef CONFIG_MLX5_CORE_IPOIB
1192 struct net_device
*mlx5_rdma_netdev_alloc(struct mlx5_core_dev
*mdev
,
1193 struct ib_device
*ibdev
,
1195 void (*setup
)(struct net_device
*))
1197 return ERR_PTR(-EOPNOTSUPP
);
1200 static inline void mlx5_rdma_netdev_free(struct net_device
*netdev
) {}
1202 struct net_device
*mlx5_rdma_netdev_alloc(struct mlx5_core_dev
*mdev
,
1203 struct ib_device
*ibdev
,
1205 void (*setup
)(struct net_device
*));
1206 void mlx5_rdma_netdev_free(struct net_device
*netdev
);
1207 #endif /* CONFIG_MLX5_CORE_IPOIB */
1209 struct mlx5_profile
{
1215 } mr_cache
[MAX_MR_CACHE_ENTRIES
];
1219 MLX5_PCI_DEV_IS_VF
= 1 << 0,
1222 static inline int mlx5_core_is_pf(struct mlx5_core_dev
*dev
)
1224 return !(dev
->priv
.pci_dev_data
& MLX5_PCI_DEV_IS_VF
);
1227 static inline int mlx5_get_gid_table_len(u16 param
)
1230 pr_warn("gid table length is zero\n");
1234 return 8 * (1 << param
);
1237 static inline bool mlx5_rl_is_supported(struct mlx5_core_dev
*dev
)
1239 return !!(dev
->priv
.rl_table
.max_size
);
1242 static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev
*dev
)
1244 return MLX5_CAP_GEN(dev
, affiliate_nic_vport_criteria
) &&
1245 MLX5_CAP_GEN(dev
, num_vhca_ports
) <= 1;
1248 static inline int mlx5_core_is_mp_master(struct mlx5_core_dev
*dev
)
1250 return MLX5_CAP_GEN(dev
, num_vhca_ports
) > 1;
1253 static inline int mlx5_core_mp_enabled(struct mlx5_core_dev
*dev
)
1255 return mlx5_core_is_mp_slave(dev
) ||
1256 mlx5_core_is_mp_master(dev
);
1259 static inline int mlx5_core_native_port_num(struct mlx5_core_dev
*dev
)
1261 if (!mlx5_core_mp_enabled(dev
))
1264 return MLX5_CAP_GEN(dev
, native_port_num
);
1268 MLX5_TRIGGERED_CMD_COMP
= (u64
)1 << 32,
1271 static inline const struct cpumask
*
1272 mlx5_get_vector_affinity(struct mlx5_core_dev
*dev
, int vector
)
1274 const struct cpumask
*mask
;
1275 struct irq_desc
*desc
;
1280 err
= mlx5_vector2eqn(dev
, MLX5_EQ_VEC_COMP_BASE
+ vector
, &eqn
, &irq
);
1284 desc
= irq_to_desc(irq
);
1285 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
1286 mask
= irq_data_get_effective_affinity_mask(&desc
->irq_data
);
1288 mask
= desc
->irq_common_data
.affinity
;
1293 #endif /* MLX5_DRIVER_H */