2 * Copyright (C) 2016 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
12 #include <linux/list.h>
13 #include "cpt_common.h"
15 /* Default command queue length */
16 #define CPT_CMD_QLEN 2046
17 #define CPT_CMD_QCHUNK_SIZE 1023
19 /* Default command timeout in seconds */
20 #define CPT_COMMAND_TIMEOUT 4
21 #define CPT_TIMER_THOLD 0xFFFF
22 #define CPT_NUM_QS_PER_VF 1
23 #define CPT_INST_SIZE 64
24 #define CPT_NEXT_CHUNK_PTR_SIZE 8
26 #define CPT_VF_MSIX_VECTORS 2
27 #define CPT_VF_INTR_MBOX_MASK BIT(0)
28 #define CPT_VF_INTR_DOVF_MASK BIT(1)
29 #define CPT_VF_INTR_IRDE_MASK BIT(2)
30 #define CPT_VF_INTR_NWRP_MASK BIT(3)
31 #define CPT_VF_INTR_SERR_MASK BIT(4)
32 #define DMA_DIRECT_DIRECT 0 /* Input DIRECT, Output DIRECT */
33 #define DMA_GATHER_SCATTER 1
37 * Enumeration cpt_vf_int_vec_e
39 * CPT VF MSI-X Vector Enumeration
40 * Enumerates the MSI-X interrupt vectors.
42 enum cpt_vf_int_vec_e
{
43 CPT_VF_INT_VEC_E_MISC
= 0x00,
44 CPT_VF_INT_VEC_E_DONE
= 0x01
47 struct command_chunk
{
50 u32 size
; /* Chunk size, max CPT_INST_CHUNK_MAX_SIZE */
51 struct hlist_node nextchunk
;
54 struct command_queue
{
55 spinlock_t lock
; /* command queue lock */
56 u32 idx
; /* Command queue host write idx */
57 u32 nchunks
; /* Number of command chunks */
58 struct command_chunk
*qhead
; /* Command queue head, instructions
61 struct hlist_head chead
;
64 struct command_qinfo
{
66 u32 qchunksize
; /* Command queue chunk size */
67 struct command_queue queue
[CPT_NUM_QS_PER_VF
];
70 struct pending_entry
{
71 u8 busy
; /* Entry status (free/busy) */
73 volatile u64
*completion_addr
; /* Completion address */
75 void (*callback
)(int, void *); /* Kernel ASYNC request callabck */
76 void *callback_arg
; /* Kernel ASYNC request callabck arg */
79 struct pending_queue
{
80 struct pending_entry
*head
; /* head of the queue */
81 u32 front
; /* Process work from here */
82 u32 rear
; /* Append new work here */
83 atomic64_t pending_count
;
84 spinlock_t lock
; /* Queue lock */
87 struct pending_qinfo
{
88 u32 nr_queues
; /* Number of queues supported */
89 u32 qlen
; /* Queue length */
90 struct pending_queue queue
[CPT_NUM_QS_PER_VF
];
93 #define for_each_pending_queue(qinfo, q, i) \
94 for (i = 0, q = &qinfo->queue[i]; i < qinfo->nr_queues; i++, \
98 u16 flags
; /* Flags to hold device status bits */
99 u8 vfid
; /* Device Index 0...CPT_MAX_VF_NUM */
100 u8 vftype
; /* VF type of SE_TYPE(1) or AE_TYPE(1) */
101 u8 vfgrp
; /* VF group (0 - 8) */
102 u8 node
; /* Operating node: Bits (46:44) in BAR0 address */
103 u8 priority
; /* VF priority ring: 1-High proirity round
104 * robin ring;0-Low priority round robin ring;
106 struct pci_dev
*pdev
; /* pci device handle */
107 void __iomem
*reg_base
; /* Register start address */
108 void *wqe_info
; /* BH worker info */
110 cpumask_var_t affinity_mask
[CPT_VF_MSIX_VECTORS
];
111 /* Command and Pending queues */
114 struct command_qinfo cqinfo
; /* Command queue information */
115 struct pending_qinfo pqinfo
; /* Pending queue information */
116 /* VF-PF mailbox communication */
121 int cptvf_send_vf_up(struct cpt_vf
*cptvf
);
122 int cptvf_send_vf_down(struct cpt_vf
*cptvf
);
123 int cptvf_send_vf_to_grp_msg(struct cpt_vf
*cptvf
);
124 int cptvf_send_vf_priority_msg(struct cpt_vf
*cptvf
);
125 int cptvf_send_vq_size_msg(struct cpt_vf
*cptvf
);
126 int cptvf_check_pf_ready(struct cpt_vf
*cptvf
);
127 void cptvf_handle_mbox_intr(struct cpt_vf
*cptvf
);
128 void cvm_crypto_exit(void);
129 int cvm_crypto_init(struct cpt_vf
*cptvf
);
130 void vq_post_process(struct cpt_vf
*cptvf
, u32 qno
);
131 void cptvf_write_vq_doorbell(struct cpt_vf
*cptvf
, u32 val
);
132 #endif /* __CPTVF_H */