2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <linux/compiler.h>
50 #include <linux/interrupt.h>
51 #include <linux/list.h>
52 #include <linux/mutex.h>
53 #include <linux/pci.h>
54 #include <linux/semaphore.h>
55 #include <linux/workqueue.h>
56 #include <rdma/ib_umem.h>
57 #include <rdma/ib_verbs.h>
58 #include <rdma/vmw_pvrdma-abi.h>
60 #include "pvrdma_ring.h"
61 #include "pvrdma_dev_api.h"
62 #include "pvrdma_verbs.h"
64 /* NOT the same as BIT_MASK(). */
65 #define PVRDMA_MASK(n) ((n << 1) - 1)
68 * VMware PVRDMA PCI device id.
70 #define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
72 #define PVRDMA_NUM_RING_PAGES 4
73 #define PVRDMA_QP_NUM_HEADER_PAGES 1
77 struct pvrdma_page_dir
{
89 spinlock_t cq_lock
; /* Poll lock. */
90 struct pvrdma_uar_map
*uar
;
92 struct pvrdma_ring_state
*ring_state
;
93 struct pvrdma_page_dir pdir
;
97 struct completion free
;
100 struct pvrdma_id_table
{
105 spinlock_t lock
; /* Table lock. */
106 unsigned long *table
;
109 struct pvrdma_uar_map
{
115 struct pvrdma_uar_table
{
116 struct pvrdma_id_table tbl
;
120 struct pvrdma_ucontext
{
121 struct ib_ucontext ibucontext
;
122 struct pvrdma_dev
*dev
;
123 struct pvrdma_uar_map uar
;
140 struct pvrdma_user_mr
{
142 struct ib_umem
*umem
;
143 struct pvrdma_mr mmr
;
144 struct pvrdma_page_dir pdir
;
152 struct pvrdma_ring
*ring
;
153 spinlock_t lock
; /* Work queue lock. */
168 spinlock_t lock
; /* SRQ lock. */
172 struct ib_umem
*umem
;
173 struct pvrdma_ring_state
*ring
;
174 struct pvrdma_page_dir pdir
;
178 struct completion free
;
187 struct ib_umem
*rumem
;
188 struct ib_umem
*sumem
;
189 struct pvrdma_page_dir pdir
;
190 struct pvrdma_srq
*srq
;
198 struct mutex mutex
; /* QP state mutex. */
200 struct completion free
;
204 /* PCI device-related information. */
205 struct ib_device ib_dev
;
206 struct pci_dev
*pdev
;
208 struct pvrdma_device_shared_region
*dsr
; /* Shared region pointer */
209 dma_addr_t dsrbase
; /* Shared region base address */
213 struct list_head device_link
;
214 unsigned int dsr_version
;
216 /* Locking and interrupt information. */
217 spinlock_t cmd_lock
; /* Command lock. */
218 struct semaphore cmd_sema
;
219 struct completion cmd_done
;
220 unsigned int nr_vectors
;
222 /* RDMA-related device information. */
223 union ib_gid
*sgid_tbl
;
224 struct pvrdma_ring_state
*async_ring_state
;
225 struct pvrdma_page_dir async_pdir
;
226 struct pvrdma_ring_state
*cq_ring_state
;
227 struct pvrdma_page_dir cq_pdir
;
228 struct pvrdma_cq
**cq_tbl
;
229 spinlock_t cq_tbl_lock
;
230 struct pvrdma_srq
**srq_tbl
;
231 spinlock_t srq_tbl_lock
;
232 struct pvrdma_qp
**qp_tbl
;
233 spinlock_t qp_tbl_lock
;
234 struct pvrdma_uar_table uar_table
;
235 struct pvrdma_uar_map driver_uar
;
236 __be64 sys_image_guid
;
237 spinlock_t desc_lock
; /* Device modification lock. */
239 struct mutex port_mutex
; /* Port modification mutex. */
247 /* Network device information. */
248 struct net_device
*netdev
;
249 struct notifier_block nb_netdev
;
252 struct pvrdma_netdevice_work
{
253 struct work_struct work
;
254 struct net_device
*event_netdev
;
258 static inline struct pvrdma_dev
*to_vdev(struct ib_device
*ibdev
)
260 return container_of(ibdev
, struct pvrdma_dev
, ib_dev
);
264 pvrdma_ucontext
*to_vucontext(struct ib_ucontext
*ibucontext
)
266 return container_of(ibucontext
, struct pvrdma_ucontext
, ibucontext
);
269 static inline struct pvrdma_pd
*to_vpd(struct ib_pd
*ibpd
)
271 return container_of(ibpd
, struct pvrdma_pd
, ibpd
);
274 static inline struct pvrdma_cq
*to_vcq(struct ib_cq
*ibcq
)
276 return container_of(ibcq
, struct pvrdma_cq
, ibcq
);
279 static inline struct pvrdma_srq
*to_vsrq(struct ib_srq
*ibsrq
)
281 return container_of(ibsrq
, struct pvrdma_srq
, ibsrq
);
284 static inline struct pvrdma_user_mr
*to_vmr(struct ib_mr
*ibmr
)
286 return container_of(ibmr
, struct pvrdma_user_mr
, ibmr
);
289 static inline struct pvrdma_qp
*to_vqp(struct ib_qp
*ibqp
)
291 return container_of(ibqp
, struct pvrdma_qp
, ibqp
);
294 static inline struct pvrdma_ah
*to_vah(struct ib_ah
*ibah
)
296 return container_of(ibah
, struct pvrdma_ah
, ibah
);
299 static inline void pvrdma_write_reg(struct pvrdma_dev
*dev
, u32 reg
, u32 val
)
301 writel(cpu_to_le32(val
), dev
->regs
+ reg
);
304 static inline u32
pvrdma_read_reg(struct pvrdma_dev
*dev
, u32 reg
)
306 return le32_to_cpu(readl(dev
->regs
+ reg
));
309 static inline void pvrdma_write_uar_cq(struct pvrdma_dev
*dev
, u32 val
)
311 writel(cpu_to_le32(val
), dev
->driver_uar
.map
+ PVRDMA_UAR_CQ_OFFSET
);
314 static inline void pvrdma_write_uar_qp(struct pvrdma_dev
*dev
, u32 val
)
316 writel(cpu_to_le32(val
), dev
->driver_uar
.map
+ PVRDMA_UAR_QP_OFFSET
);
319 static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir
*pdir
,
322 return pdir
->pages
[offset
/ PAGE_SIZE
] + (offset
% PAGE_SIZE
);
325 static inline enum pvrdma_mtu
ib_mtu_to_pvrdma(enum ib_mtu mtu
)
327 return (enum pvrdma_mtu
)mtu
;
330 static inline enum ib_mtu
pvrdma_mtu_to_ib(enum pvrdma_mtu mtu
)
332 return (enum ib_mtu
)mtu
;
335 static inline enum pvrdma_port_state
ib_port_state_to_pvrdma(
336 enum ib_port_state state
)
338 return (enum pvrdma_port_state
)state
;
341 static inline enum ib_port_state
pvrdma_port_state_to_ib(
342 enum pvrdma_port_state state
)
344 return (enum ib_port_state
)state
;
347 static inline int ib_port_cap_flags_to_pvrdma(int flags
)
349 return flags
& PVRDMA_MASK(PVRDMA_PORT_CAP_FLAGS_MAX
);
352 static inline int pvrdma_port_cap_flags_to_ib(int flags
)
357 static inline enum pvrdma_port_width
ib_port_width_to_pvrdma(
358 enum ib_port_width width
)
360 return (enum pvrdma_port_width
)width
;
363 static inline enum ib_port_width
pvrdma_port_width_to_ib(
364 enum pvrdma_port_width width
)
366 return (enum ib_port_width
)width
;
369 static inline enum pvrdma_port_speed
ib_port_speed_to_pvrdma(
370 enum ib_port_speed speed
)
372 return (enum pvrdma_port_speed
)speed
;
375 static inline enum ib_port_speed
pvrdma_port_speed_to_ib(
376 enum pvrdma_port_speed speed
)
378 return (enum ib_port_speed
)speed
;
381 static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask
)
383 return attr_mask
& PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX
);
386 static inline enum pvrdma_mig_state
ib_mig_state_to_pvrdma(
387 enum ib_mig_state state
)
389 return (enum pvrdma_mig_state
)state
;
392 static inline enum ib_mig_state
pvrdma_mig_state_to_ib(
393 enum pvrdma_mig_state state
)
395 return (enum ib_mig_state
)state
;
398 static inline int ib_access_flags_to_pvrdma(int flags
)
403 static inline int pvrdma_access_flags_to_ib(int flags
)
405 return flags
& PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX
);
408 static inline enum pvrdma_qp_type
ib_qp_type_to_pvrdma(enum ib_qp_type type
)
410 return (enum pvrdma_qp_type
)type
;
413 static inline enum ib_qp_type
pvrdma_qp_type_to_ib(enum pvrdma_qp_type type
)
415 return (enum ib_qp_type
)type
;
418 static inline enum pvrdma_qp_state
ib_qp_state_to_pvrdma(enum ib_qp_state state
)
420 return (enum pvrdma_qp_state
)state
;
423 static inline enum ib_qp_state
pvrdma_qp_state_to_ib(enum pvrdma_qp_state state
)
425 return (enum ib_qp_state
)state
;
428 static inline enum pvrdma_wr_opcode
ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op
)
431 case IB_WR_RDMA_WRITE
:
432 return PVRDMA_WR_RDMA_WRITE
;
433 case IB_WR_RDMA_WRITE_WITH_IMM
:
434 return PVRDMA_WR_RDMA_WRITE_WITH_IMM
;
436 return PVRDMA_WR_SEND
;
437 case IB_WR_SEND_WITH_IMM
:
438 return PVRDMA_WR_SEND_WITH_IMM
;
439 case IB_WR_RDMA_READ
:
440 return PVRDMA_WR_RDMA_READ
;
441 case IB_WR_ATOMIC_CMP_AND_SWP
:
442 return PVRDMA_WR_ATOMIC_CMP_AND_SWP
;
443 case IB_WR_ATOMIC_FETCH_AND_ADD
:
444 return PVRDMA_WR_ATOMIC_FETCH_AND_ADD
;
446 return PVRDMA_WR_LSO
;
447 case IB_WR_SEND_WITH_INV
:
448 return PVRDMA_WR_SEND_WITH_INV
;
449 case IB_WR_RDMA_READ_WITH_INV
:
450 return PVRDMA_WR_RDMA_READ_WITH_INV
;
451 case IB_WR_LOCAL_INV
:
452 return PVRDMA_WR_LOCAL_INV
;
454 return PVRDMA_WR_FAST_REG_MR
;
455 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP
:
456 return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP
;
457 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD
:
458 return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD
;
459 case IB_WR_REG_MR_INTEGRITY
:
460 return PVRDMA_WR_REG_SIG_MR
;
462 return PVRDMA_WR_ERROR
;
466 static inline enum ib_wc_status
pvrdma_wc_status_to_ib(
467 enum pvrdma_wc_status status
)
469 return (enum ib_wc_status
)status
;
472 static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode
)
477 case PVRDMA_WC_RDMA_WRITE
:
478 return IB_WC_RDMA_WRITE
;
479 case PVRDMA_WC_RDMA_READ
:
480 return IB_WC_RDMA_READ
;
481 case PVRDMA_WC_COMP_SWAP
:
482 return IB_WC_COMP_SWAP
;
483 case PVRDMA_WC_FETCH_ADD
:
484 return IB_WC_FETCH_ADD
;
485 case PVRDMA_WC_LOCAL_INV
:
486 return IB_WC_LOCAL_INV
;
487 case PVRDMA_WC_FAST_REG_MR
:
489 case PVRDMA_WC_MASKED_COMP_SWAP
:
490 return IB_WC_MASKED_COMP_SWAP
;
491 case PVRDMA_WC_MASKED_FETCH_ADD
:
492 return IB_WC_MASKED_FETCH_ADD
;
495 case PVRDMA_WC_RECV_RDMA_WITH_IMM
:
496 return IB_WC_RECV_RDMA_WITH_IMM
;
502 static inline int pvrdma_wc_flags_to_ib(int flags
)
507 static inline int ib_send_flags_to_pvrdma(int flags
)
509 return flags
& PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX
);
512 void pvrdma_qp_cap_to_ib(struct ib_qp_cap
*dst
,
513 const struct pvrdma_qp_cap
*src
);
514 void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap
*dst
,
515 const struct ib_qp_cap
*src
);
516 void pvrdma_gid_to_ib(union ib_gid
*dst
, const union pvrdma_gid
*src
);
517 void ib_gid_to_pvrdma(union pvrdma_gid
*dst
, const union ib_gid
*src
);
518 void pvrdma_global_route_to_ib(struct ib_global_route
*dst
,
519 const struct pvrdma_global_route
*src
);
520 void ib_global_route_to_pvrdma(struct pvrdma_global_route
*dst
,
521 const struct ib_global_route
*src
);
522 void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr
*dst
,
523 const struct pvrdma_ah_attr
*src
);
524 void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr
*dst
,
525 const struct rdma_ah_attr
*src
);
526 u8
ib_gid_type_to_pvrdma(enum ib_gid_type gid_type
);
528 int pvrdma_uar_table_init(struct pvrdma_dev
*dev
);
529 void pvrdma_uar_table_cleanup(struct pvrdma_dev
*dev
);
531 int pvrdma_uar_alloc(struct pvrdma_dev
*dev
, struct pvrdma_uar_map
*uar
);
532 void pvrdma_uar_free(struct pvrdma_dev
*dev
, struct pvrdma_uar_map
*uar
);
534 void _pvrdma_flush_cqe(struct pvrdma_qp
*qp
, struct pvrdma_cq
*cq
);
536 int pvrdma_page_dir_init(struct pvrdma_dev
*dev
, struct pvrdma_page_dir
*pdir
,
537 u64 npages
, bool alloc_pages
);
538 void pvrdma_page_dir_cleanup(struct pvrdma_dev
*dev
,
539 struct pvrdma_page_dir
*pdir
);
540 int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir
*pdir
, u64 idx
,
542 int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir
*pdir
,
543 struct ib_umem
*umem
, u64 offset
);
544 dma_addr_t
pvrdma_page_dir_get_dma(struct pvrdma_page_dir
*pdir
, u64 idx
);
545 int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir
*pdir
,
546 u64
*page_list
, int num_pages
);
548 int pvrdma_cmd_post(struct pvrdma_dev
*dev
, union pvrdma_cmd_req
*req
,
549 union pvrdma_cmd_resp
*rsp
, unsigned resp_code
);
551 #endif /* __PVRDMA_H__ */