2 * Copyright 2012 Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/module.h>
19 #include <linux/mempool.h>
20 #include <linux/errno.h>
21 #include <linux/spinlock.h>
22 #include <linux/kallsyms.h>
26 unsigned int trace_max_pages
;
27 static int fnic_max_trace_entries
;
29 static unsigned long fnic_trace_buf_p
;
30 static DEFINE_SPINLOCK(fnic_trace_lock
);
32 static fnic_trace_dbg_t fnic_trace_entries
;
33 int fnic_tracing_enabled
= 1;
36 * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
39 * This routine gets next available trace buffer entry location @wr_idx
40 * from allocated trace buffer pages and give that memory location
41 * to user to store the trace information.
44 * This routine returns pointer to next available trace entry
45 * @fnic_buf_head for user to fill trace information.
47 fnic_trace_data_t
*fnic_trace_get_buf(void)
49 unsigned long fnic_buf_head
;
52 spin_lock_irqsave(&fnic_trace_lock
, flags
);
55 * Get next available memory location for writing trace information
56 * at @wr_idx and increment @wr_idx
59 fnic_trace_entries
.page_offset
[fnic_trace_entries
.wr_idx
];
60 fnic_trace_entries
.wr_idx
++;
63 * Verify if trace buffer is full then change wd_idx to
66 if (fnic_trace_entries
.wr_idx
>= fnic_max_trace_entries
)
67 fnic_trace_entries
.wr_idx
= 0;
70 * Verify if write index @wr_idx and read index @rd_idx are same then
71 * increment @rd_idx to move to next entry in trace buffer
73 if (fnic_trace_entries
.wr_idx
== fnic_trace_entries
.rd_idx
) {
74 fnic_trace_entries
.rd_idx
++;
75 if (fnic_trace_entries
.rd_idx
>= fnic_max_trace_entries
)
76 fnic_trace_entries
.rd_idx
= 0;
78 spin_unlock_irqrestore(&fnic_trace_lock
, flags
);
79 return (fnic_trace_data_t
*)fnic_buf_head
;
83 * fnic_get_trace_data - Copy trace buffer to a memory file
84 * @fnic_dbgfs_t: pointer to debugfs trace buffer
87 * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
88 * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
89 * the log and process the log until the end of the buffer. Then it will gather
90 * from the beginning of the log and process until the current entry @wr_idx.
93 * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
95 int fnic_get_trace_data(fnic_dbgfs_t
*fnic_dbgfs_prt
)
101 char str
[KSYM_SYMBOL_LEN
];
103 fnic_trace_data_t
*tbp
;
105 spin_lock_irqsave(&fnic_trace_lock
, flags
);
106 rd_idx
= fnic_trace_entries
.rd_idx
;
107 wr_idx
= fnic_trace_entries
.wr_idx
;
108 if (wr_idx
< rd_idx
) {
110 /* Start from read index @rd_idx */
111 tbp
= (fnic_trace_data_t
*)
112 fnic_trace_entries
.page_offset
[rd_idx
];
114 spin_unlock_irqrestore(&fnic_trace_lock
, flags
);
117 /* Convert function pointer to function name */
118 if (sizeof(unsigned long) < 8) {
119 sprint_symbol(str
, tbp
->fnaddr
.low
);
120 jiffies_to_timespec(tbp
->timestamp
.low
, &val
);
122 sprint_symbol(str
, tbp
->fnaddr
.val
);
123 jiffies_to_timespec(tbp
->timestamp
.val
, &val
);
126 * Dump trace buffer entry to memory file
127 * and increment read index @rd_idx
129 len
+= snprintf(fnic_dbgfs_prt
->buffer
+ len
,
130 (trace_max_pages
* PAGE_SIZE
* 3) - len
,
131 "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
132 "%16llx %16llx %16llx\n", val
.tv_sec
,
133 val
.tv_nsec
, str
, tbp
->host_no
, tbp
->tag
,
134 tbp
->data
[0], tbp
->data
[1], tbp
->data
[2],
135 tbp
->data
[3], tbp
->data
[4]);
138 * If rd_idx is reached to maximum trace entries
139 * then move rd_idx to zero
141 if (rd_idx
> (fnic_max_trace_entries
-1))
144 * Continure dumpping trace buffer entries into
145 * memory file till rd_idx reaches write index
147 if (rd_idx
== wr_idx
)
150 } else if (wr_idx
> rd_idx
) {
152 /* Start from read index @rd_idx */
153 tbp
= (fnic_trace_data_t
*)
154 fnic_trace_entries
.page_offset
[rd_idx
];
156 spin_unlock_irqrestore(&fnic_trace_lock
, flags
);
159 /* Convert function pointer to function name */
160 if (sizeof(unsigned long) < 8) {
161 sprint_symbol(str
, tbp
->fnaddr
.low
);
162 jiffies_to_timespec(tbp
->timestamp
.low
, &val
);
164 sprint_symbol(str
, tbp
->fnaddr
.val
);
165 jiffies_to_timespec(tbp
->timestamp
.val
, &val
);
168 * Dump trace buffer entry to memory file
169 * and increment read index @rd_idx
171 len
+= snprintf(fnic_dbgfs_prt
->buffer
+ len
,
172 (trace_max_pages
* PAGE_SIZE
* 3) - len
,
173 "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
174 "%16llx %16llx %16llx\n", val
.tv_sec
,
175 val
.tv_nsec
, str
, tbp
->host_no
, tbp
->tag
,
176 tbp
->data
[0], tbp
->data
[1], tbp
->data
[2],
177 tbp
->data
[3], tbp
->data
[4]);
180 * Continue dumpping trace buffer entries into
181 * memory file till rd_idx reaches write index
183 if (rd_idx
== wr_idx
)
187 spin_unlock_irqrestore(&fnic_trace_lock
, flags
);
192 * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
195 * Initialize trace buffer data structure by allocating required memory and
196 * setting page_offset information for every trace entry by adding trace entry
197 * length to previous page_offset value.
199 int fnic_trace_buf_init(void)
201 unsigned long fnic_buf_head
;
205 trace_max_pages
= fnic_trace_max_pages
;
206 fnic_max_trace_entries
= (trace_max_pages
* PAGE_SIZE
)/
207 FNIC_ENTRY_SIZE_BYTES
;
209 fnic_trace_buf_p
= (unsigned long)vmalloc((trace_max_pages
* PAGE_SIZE
));
210 if (!fnic_trace_buf_p
) {
211 printk(KERN_ERR PFX
"Failed to allocate memory "
212 "for fnic_trace_buf_p\n");
214 goto err_fnic_trace_buf_init
;
216 memset((void *)fnic_trace_buf_p
, 0, (trace_max_pages
* PAGE_SIZE
));
218 fnic_trace_entries
.page_offset
= vmalloc(fnic_max_trace_entries
*
219 sizeof(unsigned long));
220 if (!fnic_trace_entries
.page_offset
) {
221 printk(KERN_ERR PFX
"Failed to allocate memory for"
223 if (fnic_trace_buf_p
) {
224 vfree((void *)fnic_trace_buf_p
);
225 fnic_trace_buf_p
= 0;
228 goto err_fnic_trace_buf_init
;
230 memset((void *)fnic_trace_entries
.page_offset
, 0,
231 (fnic_max_trace_entries
* sizeof(unsigned long)));
232 fnic_trace_entries
.wr_idx
= fnic_trace_entries
.rd_idx
= 0;
233 fnic_buf_head
= fnic_trace_buf_p
;
236 * Set page_offset field of fnic_trace_entries struct by
237 * calculating memory location for every trace entry using
238 * length of each trace entry
240 for (i
= 0; i
< fnic_max_trace_entries
; i
++) {
241 fnic_trace_entries
.page_offset
[i
] = fnic_buf_head
;
242 fnic_buf_head
+= FNIC_ENTRY_SIZE_BYTES
;
244 err
= fnic_trace_debugfs_init();
246 printk(KERN_ERR PFX
"Failed to initialize debugfs for tracing\n");
247 goto err_fnic_trace_debugfs_init
;
249 printk(KERN_INFO PFX
"Successfully Initialized Trace Buffer\n");
251 err_fnic_trace_debugfs_init
:
253 err_fnic_trace_buf_init
:
258 * fnic_trace_free - Free memory of fnic trace data structures.
260 void fnic_trace_free(void)
262 fnic_tracing_enabled
= 0;
263 fnic_trace_debugfs_terminate();
264 if (fnic_trace_entries
.page_offset
) {
265 vfree((void *)fnic_trace_entries
.page_offset
);
266 fnic_trace_entries
.page_offset
= NULL
;
268 if (fnic_trace_buf_p
) {
269 vfree((void *)fnic_trace_buf_p
);
270 fnic_trace_buf_p
= 0;
272 printk(KERN_INFO PFX
"Successfully Freed Trace Buffer\n");