Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[wrt350n-kernel.git] / drivers / media / video / pvrusb2 / pvrusb2-io.c
blobce3c8982ffe088aa6378da27ef18a9af4e6e9545
1 /*
3 * $Id$
5 * Copyright (C) 2005 Mike Isely <isely@pobox.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "pvrusb2-io.h"
23 #include "pvrusb2-debug.h"
24 #include <linux/errno.h>
25 #include <linux/string.h>
26 #include <linux/slab.h>
27 #include <linux/mutex.h>
29 static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state);
31 #define BUFFER_SIG 0x47653271
33 // #define SANITY_CHECK_BUFFERS
36 #ifdef SANITY_CHECK_BUFFERS
37 #define BUFFER_CHECK(bp) do { \
38 if ((bp)->signature != BUFFER_SIG) { \
39 pvr2_trace(PVR2_TRACE_ERROR_LEGS, \
40 "Buffer %p is bad at %s:%d", \
41 (bp),__FILE__,__LINE__); \
42 pvr2_buffer_describe(bp,"BadSig"); \
43 BUG(); \
44 } \
45 } while (0)
46 #else
47 #define BUFFER_CHECK(bp) do {} while(0)
48 #endif
50 struct pvr2_stream {
51 /* Buffers queued for reading */
52 struct list_head queued_list;
53 unsigned int q_count;
54 unsigned int q_bcount;
55 /* Buffers with retrieved data */
56 struct list_head ready_list;
57 unsigned int r_count;
58 unsigned int r_bcount;
59 /* Buffers available for use */
60 struct list_head idle_list;
61 unsigned int i_count;
62 unsigned int i_bcount;
63 /* Pointers to all buffers */
64 struct pvr2_buffer **buffers;
65 /* Array size of buffers */
66 unsigned int buffer_slot_count;
67 /* Total buffers actually in circulation */
68 unsigned int buffer_total_count;
69 /* Designed number of buffers to be in circulation */
70 unsigned int buffer_target_count;
71 /* Executed when ready list become non-empty */
72 pvr2_stream_callback callback_func;
73 void *callback_data;
74 /* Context for transfer endpoint */
75 struct usb_device *dev;
76 int endpoint;
77 /* Overhead for mutex enforcement */
78 spinlock_t list_lock;
79 struct mutex mutex;
80 /* Tracking state for tolerating errors */
81 unsigned int fail_count;
82 unsigned int fail_tolerance;
85 struct pvr2_buffer {
86 int id;
87 int signature;
88 enum pvr2_buffer_state state;
89 void *ptr; /* Pointer to storage area */
90 unsigned int max_count; /* Size of storage area */
91 unsigned int used_count; /* Amount of valid data in storage area */
92 int status; /* Transfer result status */
93 struct pvr2_stream *stream;
94 struct list_head list_overhead;
95 struct urb *purb;
98 static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
100 switch (st) {
101 case pvr2_buffer_state_none: return "none";
102 case pvr2_buffer_state_idle: return "idle";
103 case pvr2_buffer_state_queued: return "queued";
104 case pvr2_buffer_state_ready: return "ready";
106 return "unknown";
109 #ifdef SANITY_CHECK_BUFFERS
110 static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg)
112 pvr2_trace(PVR2_TRACE_INFO,
113 "buffer%s%s %p state=%s id=%d status=%d"
114 " stream=%p purb=%p sig=0x%x",
115 (msg ? " " : ""),
116 (msg ? msg : ""),
118 (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"),
119 (bp ? bp->id : 0),
120 (bp ? bp->status : 0),
121 (bp ? bp->stream : NULL),
122 (bp ? bp->purb : NULL),
123 (bp ? bp->signature : 0));
125 #endif /* SANITY_CHECK_BUFFERS */
127 static void pvr2_buffer_remove(struct pvr2_buffer *bp)
129 unsigned int *cnt;
130 unsigned int *bcnt;
131 unsigned int ccnt;
132 struct pvr2_stream *sp = bp->stream;
133 switch (bp->state) {
134 case pvr2_buffer_state_idle:
135 cnt = &sp->i_count;
136 bcnt = &sp->i_bcount;
137 ccnt = bp->max_count;
138 break;
139 case pvr2_buffer_state_queued:
140 cnt = &sp->q_count;
141 bcnt = &sp->q_bcount;
142 ccnt = bp->max_count;
143 break;
144 case pvr2_buffer_state_ready:
145 cnt = &sp->r_count;
146 bcnt = &sp->r_bcount;
147 ccnt = bp->used_count;
148 break;
149 default:
150 return;
152 list_del_init(&bp->list_overhead);
153 (*cnt)--;
154 (*bcnt) -= ccnt;
155 pvr2_trace(PVR2_TRACE_BUF_FLOW,
156 "/*---TRACE_FLOW---*/"
157 " bufferPool %8s dec cap=%07d cnt=%02d",
158 pvr2_buffer_state_decode(bp->state),*bcnt,*cnt);
159 bp->state = pvr2_buffer_state_none;
162 static void pvr2_buffer_set_none(struct pvr2_buffer *bp)
164 unsigned long irq_flags;
165 struct pvr2_stream *sp;
166 BUFFER_CHECK(bp);
167 sp = bp->stream;
168 pvr2_trace(PVR2_TRACE_BUF_FLOW,
169 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
171 pvr2_buffer_state_decode(bp->state),
172 pvr2_buffer_state_decode(pvr2_buffer_state_none));
173 spin_lock_irqsave(&sp->list_lock,irq_flags);
174 pvr2_buffer_remove(bp);
175 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
178 static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
180 int fl;
181 unsigned long irq_flags;
182 struct pvr2_stream *sp;
183 BUFFER_CHECK(bp);
184 sp = bp->stream;
185 pvr2_trace(PVR2_TRACE_BUF_FLOW,
186 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
188 pvr2_buffer_state_decode(bp->state),
189 pvr2_buffer_state_decode(pvr2_buffer_state_ready));
190 spin_lock_irqsave(&sp->list_lock,irq_flags);
191 fl = (sp->r_count == 0);
192 pvr2_buffer_remove(bp);
193 list_add_tail(&bp->list_overhead,&sp->ready_list);
194 bp->state = pvr2_buffer_state_ready;
195 (sp->r_count)++;
196 sp->r_bcount += bp->used_count;
197 pvr2_trace(PVR2_TRACE_BUF_FLOW,
198 "/*---TRACE_FLOW---*/"
199 " bufferPool %8s inc cap=%07d cnt=%02d",
200 pvr2_buffer_state_decode(bp->state),
201 sp->r_bcount,sp->r_count);
202 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
203 return fl;
206 static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
208 unsigned long irq_flags;
209 struct pvr2_stream *sp;
210 BUFFER_CHECK(bp);
211 sp = bp->stream;
212 pvr2_trace(PVR2_TRACE_BUF_FLOW,
213 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
215 pvr2_buffer_state_decode(bp->state),
216 pvr2_buffer_state_decode(pvr2_buffer_state_idle));
217 spin_lock_irqsave(&sp->list_lock,irq_flags);
218 pvr2_buffer_remove(bp);
219 list_add_tail(&bp->list_overhead,&sp->idle_list);
220 bp->state = pvr2_buffer_state_idle;
221 (sp->i_count)++;
222 sp->i_bcount += bp->max_count;
223 pvr2_trace(PVR2_TRACE_BUF_FLOW,
224 "/*---TRACE_FLOW---*/"
225 " bufferPool %8s inc cap=%07d cnt=%02d",
226 pvr2_buffer_state_decode(bp->state),
227 sp->i_bcount,sp->i_count);
228 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
231 static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
233 unsigned long irq_flags;
234 struct pvr2_stream *sp;
235 BUFFER_CHECK(bp);
236 sp = bp->stream;
237 pvr2_trace(PVR2_TRACE_BUF_FLOW,
238 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
240 pvr2_buffer_state_decode(bp->state),
241 pvr2_buffer_state_decode(pvr2_buffer_state_queued));
242 spin_lock_irqsave(&sp->list_lock,irq_flags);
243 pvr2_buffer_remove(bp);
244 list_add_tail(&bp->list_overhead,&sp->queued_list);
245 bp->state = pvr2_buffer_state_queued;
246 (sp->q_count)++;
247 sp->q_bcount += bp->max_count;
248 pvr2_trace(PVR2_TRACE_BUF_FLOW,
249 "/*---TRACE_FLOW---*/"
250 " bufferPool %8s inc cap=%07d cnt=%02d",
251 pvr2_buffer_state_decode(bp->state),
252 sp->q_bcount,sp->q_count);
253 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
256 static void pvr2_buffer_wipe(struct pvr2_buffer *bp)
258 if (bp->state == pvr2_buffer_state_queued) {
259 usb_kill_urb(bp->purb);
263 static int pvr2_buffer_init(struct pvr2_buffer *bp,
264 struct pvr2_stream *sp,
265 unsigned int id)
267 memset(bp,0,sizeof(*bp));
268 bp->signature = BUFFER_SIG;
269 bp->id = id;
270 pvr2_trace(PVR2_TRACE_BUF_POOL,
271 "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp);
272 bp->stream = sp;
273 bp->state = pvr2_buffer_state_none;
274 INIT_LIST_HEAD(&bp->list_overhead);
275 bp->purb = usb_alloc_urb(0,GFP_KERNEL);
276 if (! bp->purb) return -ENOMEM;
277 #ifdef SANITY_CHECK_BUFFERS
278 pvr2_buffer_describe(bp,"create");
279 #endif
280 return 0;
283 static void pvr2_buffer_done(struct pvr2_buffer *bp)
285 #ifdef SANITY_CHECK_BUFFERS
286 pvr2_buffer_describe(bp,"delete");
287 #endif
288 pvr2_buffer_wipe(bp);
289 pvr2_buffer_set_none(bp);
290 bp->signature = 0;
291 bp->stream = NULL;
292 usb_free_urb(bp->purb);
293 pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/"
294 " bufferDone %p",bp);
297 static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
299 int ret;
300 unsigned int scnt;
302 /* Allocate buffers pointer array in multiples of 32 entries */
303 if (cnt == sp->buffer_total_count) return 0;
305 pvr2_trace(PVR2_TRACE_BUF_POOL,
306 "/*---TRACE_FLOW---*/ poolResize "
307 " stream=%p cur=%d adj=%+d",
309 sp->buffer_total_count,
310 cnt-sp->buffer_total_count);
312 scnt = cnt & ~0x1f;
313 if (cnt > scnt) scnt += 0x20;
315 if (cnt > sp->buffer_total_count) {
316 if (scnt > sp->buffer_slot_count) {
317 struct pvr2_buffer **nb;
318 nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
319 if (!nb) return -ENOMEM;
320 if (sp->buffer_slot_count) {
321 memcpy(nb,sp->buffers,
322 sp->buffer_slot_count * sizeof(*nb));
323 kfree(sp->buffers);
325 sp->buffers = nb;
326 sp->buffer_slot_count = scnt;
328 while (sp->buffer_total_count < cnt) {
329 struct pvr2_buffer *bp;
330 bp = kmalloc(sizeof(*bp),GFP_KERNEL);
331 if (!bp) return -ENOMEM;
332 ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count);
333 if (ret) {
334 kfree(bp);
335 return -ENOMEM;
337 sp->buffers[sp->buffer_total_count] = bp;
338 (sp->buffer_total_count)++;
339 pvr2_buffer_set_idle(bp);
341 } else {
342 while (sp->buffer_total_count > cnt) {
343 struct pvr2_buffer *bp;
344 bp = sp->buffers[sp->buffer_total_count - 1];
345 /* Paranoia */
346 sp->buffers[sp->buffer_total_count - 1] = NULL;
347 (sp->buffer_total_count)--;
348 pvr2_buffer_done(bp);
349 kfree(bp);
351 if (scnt < sp->buffer_slot_count) {
352 struct pvr2_buffer **nb = NULL;
353 if (scnt) {
354 nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
355 if (!nb) return -ENOMEM;
356 memcpy(nb,sp->buffers,scnt * sizeof(*nb));
358 kfree(sp->buffers);
359 sp->buffers = nb;
360 sp->buffer_slot_count = scnt;
363 return 0;
366 static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
368 struct pvr2_buffer *bp;
369 unsigned int cnt;
371 if (sp->buffer_total_count == sp->buffer_target_count) return 0;
373 pvr2_trace(PVR2_TRACE_BUF_POOL,
374 "/*---TRACE_FLOW---*/"
375 " poolCheck stream=%p cur=%d tgt=%d",
376 sp,sp->buffer_total_count,sp->buffer_target_count);
378 if (sp->buffer_total_count < sp->buffer_target_count) {
379 return pvr2_stream_buffer_count(sp,sp->buffer_target_count);
382 cnt = 0;
383 while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) {
384 bp = sp->buffers[sp->buffer_total_count - (cnt + 1)];
385 if (bp->state != pvr2_buffer_state_idle) break;
386 cnt++;
388 if (cnt) {
389 pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt);
392 return 0;
395 static void pvr2_stream_internal_flush(struct pvr2_stream *sp)
397 struct list_head *lp;
398 struct pvr2_buffer *bp1;
399 while ((lp = sp->queued_list.next) != &sp->queued_list) {
400 bp1 = list_entry(lp,struct pvr2_buffer,list_overhead);
401 pvr2_buffer_wipe(bp1);
402 /* At this point, we should be guaranteed that no
403 completion callback may happen on this buffer. But it's
404 possible that it might have completed after we noticed
405 it but before we wiped it. So double check its status
406 here first. */
407 if (bp1->state != pvr2_buffer_state_queued) continue;
408 pvr2_buffer_set_idle(bp1);
410 if (sp->buffer_total_count != sp->buffer_target_count) {
411 pvr2_stream_achieve_buffer_count(sp);
415 static void pvr2_stream_init(struct pvr2_stream *sp)
417 spin_lock_init(&sp->list_lock);
418 mutex_init(&sp->mutex);
419 INIT_LIST_HEAD(&sp->queued_list);
420 INIT_LIST_HEAD(&sp->ready_list);
421 INIT_LIST_HEAD(&sp->idle_list);
424 static void pvr2_stream_done(struct pvr2_stream *sp)
426 mutex_lock(&sp->mutex); do {
427 pvr2_stream_internal_flush(sp);
428 pvr2_stream_buffer_count(sp,0);
429 } while (0); mutex_unlock(&sp->mutex);
432 static void buffer_complete(struct urb *urb)
434 struct pvr2_buffer *bp = urb->context;
435 struct pvr2_stream *sp;
436 unsigned long irq_flags;
437 BUFFER_CHECK(bp);
438 sp = bp->stream;
439 bp->used_count = 0;
440 bp->status = 0;
441 pvr2_trace(PVR2_TRACE_BUF_FLOW,
442 "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d",
443 bp,urb->status,urb->actual_length);
444 spin_lock_irqsave(&sp->list_lock,irq_flags);
445 if ((!(urb->status)) ||
446 (urb->status == -ENOENT) ||
447 (urb->status == -ECONNRESET) ||
448 (urb->status == -ESHUTDOWN)) {
449 bp->used_count = urb->actual_length;
450 if (sp->fail_count) {
451 pvr2_trace(PVR2_TRACE_TOLERANCE,
452 "stream %p transfer ok"
453 " - fail count reset",sp);
454 sp->fail_count = 0;
456 } else if (sp->fail_count < sp->fail_tolerance) {
457 // We can tolerate this error, because we're below the
458 // threshold...
459 (sp->fail_count)++;
460 pvr2_trace(PVR2_TRACE_TOLERANCE,
461 "stream %p ignoring error %d"
462 " - fail count increased to %u",
463 sp,urb->status,sp->fail_count);
464 } else {
465 bp->status = urb->status;
467 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
468 pvr2_buffer_set_ready(bp);
469 if (sp && sp->callback_func) {
470 sp->callback_func(sp->callback_data);
474 struct pvr2_stream *pvr2_stream_create(void)
476 struct pvr2_stream *sp;
477 sp = kzalloc(sizeof(*sp),GFP_KERNEL);
478 if (!sp) return sp;
479 pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp);
480 pvr2_stream_init(sp);
481 return sp;
484 void pvr2_stream_destroy(struct pvr2_stream *sp)
486 if (!sp) return;
487 pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp);
488 pvr2_stream_done(sp);
489 kfree(sp);
492 void pvr2_stream_setup(struct pvr2_stream *sp,
493 struct usb_device *dev,
494 int endpoint,
495 unsigned int tolerance)
497 mutex_lock(&sp->mutex); do {
498 pvr2_stream_internal_flush(sp);
499 sp->dev = dev;
500 sp->endpoint = endpoint;
501 sp->fail_tolerance = tolerance;
502 } while(0); mutex_unlock(&sp->mutex);
505 void pvr2_stream_set_callback(struct pvr2_stream *sp,
506 pvr2_stream_callback func,
507 void *data)
509 unsigned long irq_flags;
510 mutex_lock(&sp->mutex); do {
511 spin_lock_irqsave(&sp->list_lock,irq_flags);
512 sp->callback_data = data;
513 sp->callback_func = func;
514 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
515 } while(0); mutex_unlock(&sp->mutex);
518 /* Query / set the nominal buffer count */
519 int pvr2_stream_get_buffer_count(struct pvr2_stream *sp)
521 return sp->buffer_target_count;
524 int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
526 int ret;
527 if (sp->buffer_target_count == cnt) return 0;
528 mutex_lock(&sp->mutex); do {
529 sp->buffer_target_count = cnt;
530 ret = pvr2_stream_achieve_buffer_count(sp);
531 } while(0); mutex_unlock(&sp->mutex);
532 return ret;
535 struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp)
537 struct list_head *lp = sp->idle_list.next;
538 if (lp == &sp->idle_list) return NULL;
539 return list_entry(lp,struct pvr2_buffer,list_overhead);
542 struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp)
544 struct list_head *lp = sp->ready_list.next;
545 if (lp == &sp->ready_list) return NULL;
546 return list_entry(lp,struct pvr2_buffer,list_overhead);
549 struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id)
551 if (id < 0) return NULL;
552 if (id >= sp->buffer_total_count) return NULL;
553 return sp->buffers[id];
556 int pvr2_stream_get_ready_count(struct pvr2_stream *sp)
558 return sp->r_count;
561 void pvr2_stream_kill(struct pvr2_stream *sp)
563 struct pvr2_buffer *bp;
564 mutex_lock(&sp->mutex); do {
565 pvr2_stream_internal_flush(sp);
566 while ((bp = pvr2_stream_get_ready_buffer(sp)) != 0) {
567 pvr2_buffer_set_idle(bp);
569 if (sp->buffer_total_count != sp->buffer_target_count) {
570 pvr2_stream_achieve_buffer_count(sp);
572 } while(0); mutex_unlock(&sp->mutex);
575 int pvr2_buffer_queue(struct pvr2_buffer *bp)
577 #undef SEED_BUFFER
578 #ifdef SEED_BUFFER
579 unsigned int idx;
580 unsigned int val;
581 #endif
582 int ret = 0;
583 struct pvr2_stream *sp;
584 if (!bp) return -EINVAL;
585 sp = bp->stream;
586 mutex_lock(&sp->mutex); do {
587 pvr2_buffer_wipe(bp);
588 if (!sp->dev) {
589 ret = -EIO;
590 break;
592 pvr2_buffer_set_queued(bp);
593 #ifdef SEED_BUFFER
594 for (idx = 0; idx < (bp->max_count) / 4; idx++) {
595 val = bp->id << 24;
596 val |= idx;
597 ((unsigned int *)(bp->ptr))[idx] = val;
599 #endif
600 bp->status = -EINPROGRESS;
601 usb_fill_bulk_urb(bp->purb, // struct urb *urb
602 sp->dev, // struct usb_device *dev
603 // endpoint (below)
604 usb_rcvbulkpipe(sp->dev,sp->endpoint),
605 bp->ptr, // void *transfer_buffer
606 bp->max_count, // int buffer_length
607 buffer_complete,
608 bp);
609 usb_submit_urb(bp->purb,GFP_KERNEL);
610 } while(0); mutex_unlock(&sp->mutex);
611 return ret;
614 int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
616 int ret = 0;
617 unsigned long irq_flags;
618 struct pvr2_stream *sp;
619 if (!bp) return -EINVAL;
620 sp = bp->stream;
621 mutex_lock(&sp->mutex); do {
622 spin_lock_irqsave(&sp->list_lock,irq_flags);
623 if (bp->state != pvr2_buffer_state_idle) {
624 ret = -EPERM;
625 } else {
626 bp->ptr = ptr;
627 bp->stream->i_bcount -= bp->max_count;
628 bp->max_count = cnt;
629 bp->stream->i_bcount += bp->max_count;
630 pvr2_trace(PVR2_TRACE_BUF_FLOW,
631 "/*---TRACE_FLOW---*/ bufferPool "
632 " %8s cap cap=%07d cnt=%02d",
633 pvr2_buffer_state_decode(
634 pvr2_buffer_state_idle),
635 bp->stream->i_bcount,bp->stream->i_count);
637 spin_unlock_irqrestore(&sp->list_lock,irq_flags);
638 } while(0); mutex_unlock(&sp->mutex);
639 return ret;
642 unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp)
644 return bp->used_count;
647 int pvr2_buffer_get_status(struct pvr2_buffer *bp)
649 return bp->status;
652 int pvr2_buffer_get_id(struct pvr2_buffer *bp)
654 return bp->id;
659 Stuff for Emacs to see, in order to encourage consistent editing style:
660 *** Local Variables: ***
661 *** mode: c ***
662 *** fill-column: 75 ***
663 *** tab-width: 8 ***
664 *** c-basic-offset: 8 ***
665 *** End: ***