1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
23 /*! \file octeon_main.h
24 * \brief Host Driver: This file is included by all host driver source files
25 * to include common definitions.
28 #ifndef _OCTEON_MAIN_H_
29 #define _OCTEON_MAIN_H_
31 #if BITS_PER_LONG == 32
32 #define CVM_CAST64(v) ((long long)(v))
33 #elif BITS_PER_LONG == 64
34 #define CVM_CAST64(v) ((long long)(long)(v))
36 #error "Unknown system architecture"
39 #define DRV_NAME "LiquidIO"
42 * \brief determines if a given console has debug enabled.
43 * @param console console to check
44 * @returns 1 = enabled. 0 otherwise
46 int octeon_console_debug_enabled(u32 console
);
48 /* BQL-related functions */
49 void octeon_report_sent_bytes_to_bql(void *buf
, int reqtype
);
50 void octeon_update_tx_completion_counters(void *buf
, int reqtype
,
51 unsigned int *pkts_compl
,
52 unsigned int *bytes_compl
);
53 void octeon_report_tx_completion_to_bql(void *txq
, unsigned int pkts_compl
,
54 unsigned int bytes_compl
);
57 static inline void octeon_swap_8B_data(u64
*data
, u32 blocks
)
67 * \brief unmaps a PCI BAR
68 * @param oct Pointer to Octeon device
69 * @param baridx bar index
71 static inline void octeon_unmap_pci_barx(struct octeon_device
*oct
, int baridx
)
73 dev_dbg(&oct
->pci_dev
->dev
, "Freeing PCI mapped regions for Bar%d\n",
76 if (oct
->mmio
[baridx
].done
)
77 iounmap(oct
->mmio
[baridx
].hw_addr
);
79 if (oct
->mmio
[baridx
].start
)
80 pci_release_region(oct
->pci_dev
, baridx
* 2);
84 * \brief maps a PCI BAR
85 * @param oct Pointer to Octeon device
86 * @param baridx bar index
87 * @param max_map_len maximum length of mapped memory
89 static inline int octeon_map_pci_barx(struct octeon_device
*oct
,
90 int baridx
, int max_map_len
)
94 if (pci_request_region(oct
->pci_dev
, baridx
* 2, DRV_NAME
)) {
95 dev_err(&oct
->pci_dev
->dev
, "pci_request_region failed for bar %d\n",
100 oct
->mmio
[baridx
].start
= pci_resource_start(oct
->pci_dev
, baridx
* 2);
101 oct
->mmio
[baridx
].len
= pci_resource_len(oct
->pci_dev
, baridx
* 2);
103 mapped_len
= oct
->mmio
[baridx
].len
;
107 if (max_map_len
&& (mapped_len
> max_map_len
))
108 mapped_len
= max_map_len
;
110 oct
->mmio
[baridx
].hw_addr
=
111 ioremap(oct
->mmio
[baridx
].start
, mapped_len
);
112 oct
->mmio
[baridx
].mapped_len
= mapped_len
;
114 dev_dbg(&oct
->pci_dev
->dev
, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
115 baridx
, oct
->mmio
[baridx
].start
, mapped_len
,
116 oct
->mmio
[baridx
].len
);
118 if (!oct
->mmio
[baridx
].hw_addr
) {
119 dev_err(&oct
->pci_dev
->dev
, "error ioremap for bar %d\n",
123 oct
->mmio
[baridx
].done
= 1;
129 cnnic_numa_alloc_aligned_dma(u32 size
,
137 #define OCTEON_MAX_ALLOC_RETRIES 1
139 struct page
*page
= NULL
;
141 page
= alloc_pages_node(numa_node
,
145 page
= alloc_pages(GFP_KERNEL
,
147 ptr
= (void *)page_address(page
);
148 if ((unsigned long)ptr
& 0x07) {
149 __free_pages(page
, get_order(size
));
151 /* Increment the size required if the first
158 } while ((retries
<= OCTEON_MAX_ALLOC_RETRIES
) && !ptr
);
161 *orig_ptr
= (unsigned long)ptr
;
162 if ((unsigned long)ptr
& 0x07)
163 ptr
= (void *)(((unsigned long)ptr
+ 7) & ~(7UL));
167 #define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
168 free_pages(orig_ptr, get_order(size))
171 sleep_cond(wait_queue_head_t
*wait_queue
, int *condition
)
175 init_waitqueue_entry(&we
, current
);
176 add_wait_queue(wait_queue
, &we
);
177 while (!(READ_ONCE(*condition
))) {
178 set_current_state(TASK_INTERRUPTIBLE
);
179 if (signal_pending(current
))
184 set_current_state(TASK_RUNNING
);
185 remove_wait_queue(wait_queue
, &we
);
189 sleep_atomic_cond(wait_queue_head_t
*waitq
, atomic_t
*pcond
)
193 init_waitqueue_entry(&we
, current
);
194 add_wait_queue(waitq
, &we
);
195 while (!atomic_read(pcond
)) {
196 set_current_state(TASK_INTERRUPTIBLE
);
197 if (signal_pending(current
))
202 set_current_state(TASK_RUNNING
);
203 remove_wait_queue(waitq
, &we
);
206 /* Gives up the CPU for a timeout period.
207 * Check that the condition is not true before we go to sleep for a
211 sleep_timeout_cond(wait_queue_head_t
*wait_queue
,
217 init_waitqueue_entry(&we
, current
);
218 add_wait_queue(wait_queue
, &we
);
219 set_current_state(TASK_INTERRUPTIBLE
);
221 schedule_timeout(timeout
);
222 set_current_state(TASK_RUNNING
);
223 remove_wait_queue(wait_queue
, &we
);
227 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
231 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
235 #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
239 #define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
242 #endif /* _OCTEON_MAIN_H_ */