DaVinci: EDMA: Add queue 2 and 3 for DM365 and DM6467
[linux-ginger.git] / drivers / ieee1394 / csr1212.c
blobe76cac64c53307852a442adac5264fe387c625e3
1 /*
2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 /* TODO List:
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/kmemcheck.h>
39 #include <linux/string.h>
40 #include <asm/bug.h>
41 #include <asm/byteorder.h>
43 #include "csr1212.h"
46 /* Permitted key type for each key id */
47 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
48 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
49 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
50 #define __L (1 << CSR1212_KV_TYPE_LEAF)
51 static const u8 csr1212_key_id_type_map[0x30] = {
52 __C, /* used by Apple iSight */
53 __D | __L, /* Descriptor */
54 __I | __D | __L, /* Bus_Dependent_Info */
55 __I | __D | __L, /* Vendor */
56 __I, /* Hardware_Version */
57 0, 0, /* Reserved */
58 __D | __L | __I, /* Module */
59 __I, 0, 0, 0, /* used by Apple iSight, Reserved */
60 __I, /* Node_Capabilities */
61 __L, /* EUI_64 */
62 0, 0, 0, /* Reserved */
63 __D, /* Unit */
64 __I, /* Specifier_ID */
65 __I, /* Version */
66 __I | __C | __D | __L, /* Dependent_Info */
67 __L, /* Unit_Location */
68 0, /* Reserved */
69 __I, /* Model */
70 __D, /* Instance */
71 __L, /* Keyword */
72 __D, /* Feature */
73 __L, /* Extended_ROM */
74 __I, /* Extended_Key_Specifier_ID */
75 __I, /* Extended_Key */
76 __I | __C | __D | __L, /* Extended_Data */
77 __L, /* Modifiable_Descriptor */
78 __I, /* Directory_ID */
79 __I, /* Revision */
81 #undef __I
82 #undef __C
83 #undef __D
84 #undef __L
87 #define quads_to_bytes(_q) ((_q) * sizeof(u32))
88 #define bytes_to_quads(_b) DIV_ROUND_UP(_b, sizeof(u32))
90 static void free_keyval(struct csr1212_keyval *kv)
92 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
93 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
94 CSR1212_FREE(kv->value.leaf.data);
96 CSR1212_FREE(kv);
99 static u16 csr1212_crc16(const u32 *buffer, size_t length)
101 int shift;
102 u32 data;
103 u16 sum, crc = 0;
105 for (; length; length--) {
106 data = be32_to_cpu(*buffer);
107 buffer++;
108 for (shift = 28; shift >= 0; shift -= 4 ) {
109 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
110 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
112 crc &= 0xffff;
115 return cpu_to_be16(crc);
118 /* Microsoft computes the CRC with the bytes in reverse order. */
119 static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
121 int shift;
122 u32 data;
123 u16 sum, crc = 0;
125 for (; length; length--) {
126 data = le32_to_cpu(*buffer);
127 buffer++;
128 for (shift = 28; shift >= 0; shift -= 4 ) {
129 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
130 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
132 crc &= 0xffff;
135 return cpu_to_be16(crc);
138 static struct csr1212_dentry *
139 csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
141 struct csr1212_dentry *pos;
143 for (pos = dir->value.directory.dentries_head;
144 pos != NULL; pos = pos->next)
145 if (pos->kv == kv)
146 return pos;
147 return NULL;
150 static struct csr1212_keyval *
151 csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
153 struct csr1212_keyval *kv;
155 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next)
156 if (kv->offset == offset)
157 return kv;
158 return NULL;
162 /* Creation Routines */
164 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
165 size_t bus_info_size, void *private)
167 struct csr1212_csr *csr;
169 csr = CSR1212_MALLOC(sizeof(*csr));
170 if (!csr)
171 return NULL;
173 csr->cache_head =
174 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
175 CSR1212_CONFIG_ROM_SPACE_SIZE);
176 if (!csr->cache_head) {
177 CSR1212_FREE(csr);
178 return NULL;
181 /* The keyval key id is not used for the root node, but a valid key id
182 * that can be used for a directory needs to be passed to
183 * csr1212_new_directory(). */
184 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
185 if (!csr->root_kv) {
186 CSR1212_FREE(csr->cache_head);
187 CSR1212_FREE(csr);
188 return NULL;
191 csr->bus_info_data = csr->cache_head->data;
192 csr->bus_info_len = bus_info_size;
193 csr->crc_len = bus_info_size;
194 csr->ops = ops;
195 csr->private = private;
196 csr->cache_tail = csr->cache_head;
198 return csr;
201 void csr1212_init_local_csr(struct csr1212_csr *csr,
202 const u32 *bus_info_data, int max_rom)
204 static const int mr_map[] = { 4, 64, 1024, 0 };
206 BUG_ON(max_rom & ~0x3);
207 csr->max_rom = mr_map[max_rom];
208 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
211 static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
213 struct csr1212_keyval *kv;
215 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
216 return NULL;
218 kv = CSR1212_MALLOC(sizeof(*kv));
219 if (!kv)
220 return NULL;
222 atomic_set(&kv->refcnt, 1);
223 kv->key.type = type;
224 kv->key.id = key;
225 kv->associate = NULL;
226 kv->next = NULL;
227 kv->prev = NULL;
228 kv->offset = 0;
229 kv->valid = 0;
230 return kv;
233 struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
235 struct csr1212_keyval *kv;
237 kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
238 if (!kv)
239 return NULL;
241 kv->value.immediate = value;
242 kv->valid = 1;
243 return kv;
246 static struct csr1212_keyval *
247 csr1212_new_leaf(u8 key, const void *data, size_t data_len)
249 struct csr1212_keyval *kv;
251 kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
252 if (!kv)
253 return NULL;
255 if (data_len > 0) {
256 kv->value.leaf.data = CSR1212_MALLOC(data_len);
257 if (!kv->value.leaf.data) {
258 CSR1212_FREE(kv);
259 return NULL;
262 if (data)
263 memcpy(kv->value.leaf.data, data, data_len);
264 } else {
265 kv->value.leaf.data = NULL;
268 kv->value.leaf.len = bytes_to_quads(data_len);
269 kv->offset = 0;
270 kv->valid = 1;
272 return kv;
275 static struct csr1212_keyval *
276 csr1212_new_csr_offset(u8 key, u32 csr_offset)
278 struct csr1212_keyval *kv;
280 kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
281 if (!kv)
282 return NULL;
284 kv->value.csr_offset = csr_offset;
286 kv->offset = 0;
287 kv->valid = 1;
288 return kv;
291 struct csr1212_keyval *csr1212_new_directory(u8 key)
293 struct csr1212_keyval *kv;
295 kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
296 if (!kv)
297 return NULL;
299 kv->value.directory.len = 0;
300 kv->offset = 0;
301 kv->value.directory.dentries_head = NULL;
302 kv->value.directory.dentries_tail = NULL;
303 kv->valid = 1;
304 return kv;
307 void csr1212_associate_keyval(struct csr1212_keyval *kv,
308 struct csr1212_keyval *associate)
310 BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
311 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
312 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
313 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
314 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
315 associate->key.id < 0x30) ||
316 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
317 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
318 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
319 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
320 (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
321 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
322 (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
323 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
325 if (kv->associate)
326 csr1212_release_keyval(kv->associate);
328 csr1212_keep_keyval(associate);
329 kv->associate = associate;
332 static int __csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
333 struct csr1212_keyval *kv,
334 bool keep_keyval)
336 struct csr1212_dentry *dentry;
338 BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
340 dentry = CSR1212_MALLOC(sizeof(*dentry));
341 if (!dentry)
342 return -ENOMEM;
344 if (keep_keyval)
345 csr1212_keep_keyval(kv);
346 dentry->kv = kv;
348 dentry->next = NULL;
349 dentry->prev = dir->value.directory.dentries_tail;
351 if (!dir->value.directory.dentries_head)
352 dir->value.directory.dentries_head = dentry;
354 if (dir->value.directory.dentries_tail)
355 dir->value.directory.dentries_tail->next = dentry;
356 dir->value.directory.dentries_tail = dentry;
358 return CSR1212_SUCCESS;
361 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
362 struct csr1212_keyval *kv)
364 return __csr1212_attach_keyval_to_directory(dir, kv, true);
367 #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
368 (&((kv)->value.leaf.data[1]))
370 #define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
371 ((kv)->value.leaf.data[0] = \
372 cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
373 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
374 #define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
375 ((kv)->value.leaf.data[0] = \
376 cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
377 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
378 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
380 static struct csr1212_keyval *
381 csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
382 const void *data, size_t data_len)
384 struct csr1212_keyval *kv;
386 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
387 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
388 if (!kv)
389 return NULL;
391 kmemcheck_annotate_variable(kv->value.leaf.data[0]);
392 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
393 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
395 if (data)
396 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
398 return kv;
401 /* Check if string conforms to minimal ASCII as per IEEE 1212 clause 7.4 */
402 static int csr1212_check_minimal_ascii(const char *s)
404 static const char minimal_ascii_table[] = {
405 /* 1 2 4 8 16 32 64 128 */
406 128, /* --, --, --, --, --, --, --, 07, */
407 4 + 16 + 32, /* --, --, 0a, --, 0C, 0D, --, --, */
408 0, /* --, --, --, --, --, --, --, --, */
409 0, /* --, --, --, --, --, --, --, --, */
410 255 - 8 - 16, /* 20, 21, 22, --, --, 25, 26, 27, */
411 255, /* 28, 29, 2a, 2b, 2c, 2d, 2e, 2f, */
412 255, /* 30, 31, 32, 33, 34, 35, 36, 37, */
413 255, /* 38, 39, 3a, 3b, 3c, 3d, 3e, 3f, */
414 255, /* 40, 41, 42, 43, 44, 45, 46, 47, */
415 255, /* 48, 49, 4a, 4b, 4c, 4d, 4e, 4f, */
416 255, /* 50, 51, 52, 53, 54, 55, 56, 57, */
417 1 + 2 + 4 + 128, /* 58, 59, 5a, --, --, --, --, 5f, */
418 255 - 1, /* --, 61, 62, 63, 64, 65, 66, 67, */
419 255, /* 68, 69, 6a, 6b, 6c, 6d, 6e, 6f, */
420 255, /* 70, 71, 72, 73, 74, 75, 76, 77, */
421 1 + 2 + 4, /* 78, 79, 7a, --, --, --, --, --, */
423 int i, j;
425 for (; *s; s++) {
426 i = *s >> 3; /* i = *s / 8; */
427 j = 1 << (*s & 3); /* j = 1 << (*s % 8); */
429 if (i >= ARRAY_SIZE(minimal_ascii_table) ||
430 !(minimal_ascii_table[i] & j))
431 return -EINVAL;
433 return 0;
436 /* IEEE 1212 clause 7.5.4.1 textual descriptors (English, minimal ASCII) */
437 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
439 struct csr1212_keyval *kv;
440 u32 *text;
441 size_t str_len, quads;
443 if (!s || !*s || csr1212_check_minimal_ascii(s))
444 return NULL;
446 str_len = strlen(s);
447 quads = bytes_to_quads(str_len);
448 kv = csr1212_new_descriptor_leaf(0, 0, NULL, quads_to_bytes(quads) +
449 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
450 if (!kv)
451 return NULL;
453 kv->value.leaf.data[1] = 0; /* width, character_set, language */
454 text = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
455 text[quads - 1] = 0; /* padding */
456 memcpy(text, s, str_len);
458 return kv;
462 /* Destruction Routines */
464 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
465 struct csr1212_keyval *kv)
467 struct csr1212_dentry *dentry;
469 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
470 return;
472 dentry = csr1212_find_keyval(dir, kv);
474 if (!dentry)
475 return;
477 if (dentry->prev)
478 dentry->prev->next = dentry->next;
479 if (dentry->next)
480 dentry->next->prev = dentry->prev;
481 if (dir->value.directory.dentries_head == dentry)
482 dir->value.directory.dentries_head = dentry->next;
483 if (dir->value.directory.dentries_tail == dentry)
484 dir->value.directory.dentries_tail = dentry->prev;
486 CSR1212_FREE(dentry);
488 csr1212_release_keyval(kv);
491 /* This function is used to free the memory taken by a keyval. If the given
492 * keyval is a directory type, then any keyvals contained in that directory
493 * will be destroyed as well if noone holds a reference on them. By means of
494 * list manipulation, this routine will descend a directory structure in a
495 * non-recursive manner. */
496 void csr1212_release_keyval(struct csr1212_keyval *kv)
498 struct csr1212_keyval *k, *a;
499 struct csr1212_dentry dentry;
500 struct csr1212_dentry *head, *tail;
502 if (!atomic_dec_and_test(&kv->refcnt))
503 return;
505 dentry.kv = kv;
506 dentry.next = NULL;
507 dentry.prev = NULL;
509 head = &dentry;
510 tail = head;
512 while (head) {
513 k = head->kv;
515 while (k) {
516 /* must not dec_and_test kv->refcnt again */
517 if (k != kv && !atomic_dec_and_test(&k->refcnt))
518 break;
520 a = k->associate;
522 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
523 /* If the current entry is a directory, move all
524 * the entries to the destruction list. */
525 if (k->value.directory.dentries_head) {
526 tail->next =
527 k->value.directory.dentries_head;
528 k->value.directory.dentries_head->prev =
529 tail;
530 tail = k->value.directory.dentries_tail;
533 free_keyval(k);
534 k = a;
537 head = head->next;
538 if (head) {
539 if (head->prev && head->prev != &dentry)
540 CSR1212_FREE(head->prev);
541 head->prev = NULL;
542 } else if (tail != &dentry) {
543 CSR1212_FREE(tail);
548 void csr1212_destroy_csr(struct csr1212_csr *csr)
550 struct csr1212_csr_rom_cache *c, *oc;
551 struct csr1212_cache_region *cr, *ocr;
553 csr1212_release_keyval(csr->root_kv);
555 c = csr->cache_head;
556 while (c) {
557 oc = c;
558 cr = c->filled_head;
559 while (cr) {
560 ocr = cr;
561 cr = cr->next;
562 CSR1212_FREE(ocr);
564 c = c->next;
565 CSR1212_FREE(oc);
568 CSR1212_FREE(csr);
572 /* CSR Image Creation */
574 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
576 struct csr1212_csr_rom_cache *cache;
577 u64 csr_addr;
579 BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
580 !csr->ops->release_addr || csr->max_rom < 1);
582 /* ROM size must be a multiple of csr->max_rom */
583 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
585 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom,
586 csr->private);
587 if (csr_addr == CSR1212_INVALID_ADDR_SPACE)
588 return -ENOMEM;
590 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
591 /* Invalid address returned from allocate_addr_range(). */
592 csr->ops->release_addr(csr_addr, csr->private);
593 return -ENOMEM;
596 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE,
597 romsize);
598 if (!cache) {
599 csr->ops->release_addr(csr_addr, csr->private);
600 return -ENOMEM;
603 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF,
604 CSR1212_KV_ID_EXTENDED_ROM);
605 if (!cache->ext_rom) {
606 csr->ops->release_addr(csr_addr, csr->private);
607 CSR1212_FREE(cache);
608 return -ENOMEM;
611 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) !=
612 CSR1212_SUCCESS) {
613 csr1212_release_keyval(cache->ext_rom);
614 csr->ops->release_addr(csr_addr, csr->private);
615 CSR1212_FREE(cache);
616 return -ENOMEM;
618 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
619 cache->ext_rom->value.leaf.len = -1;
620 cache->ext_rom->value.leaf.data = cache->data;
622 /* Add cache to tail of cache list */
623 cache->prev = csr->cache_tail;
624 csr->cache_tail->next = cache;
625 csr->cache_tail = cache;
626 return CSR1212_SUCCESS;
629 static void csr1212_remove_cache(struct csr1212_csr *csr,
630 struct csr1212_csr_rom_cache *cache)
632 if (csr->cache_head == cache)
633 csr->cache_head = cache->next;
634 if (csr->cache_tail == cache)
635 csr->cache_tail = cache->prev;
637 if (cache->prev)
638 cache->prev->next = cache->next;
639 if (cache->next)
640 cache->next->prev = cache->prev;
642 if (cache->ext_rom) {
643 csr1212_detach_keyval_from_directory(csr->root_kv,
644 cache->ext_rom);
645 csr1212_release_keyval(cache->ext_rom);
648 CSR1212_FREE(cache);
651 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
652 struct csr1212_keyval **layout_tail)
654 struct csr1212_dentry *dentry;
655 struct csr1212_keyval *dkv;
656 struct csr1212_keyval *last_extkey_spec = NULL;
657 struct csr1212_keyval *last_extkey = NULL;
658 int num_entries = 0;
660 for (dentry = dir->value.directory.dentries_head; dentry;
661 dentry = dentry->next) {
662 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
663 /* Special Case: Extended Key Specifier_ID */
664 if (dkv->key.id ==
665 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
666 if (last_extkey_spec == NULL)
667 last_extkey_spec = dkv;
668 else if (dkv->value.immediate !=
669 last_extkey_spec->value.immediate)
670 last_extkey_spec = dkv;
671 else
672 continue;
673 /* Special Case: Extended Key */
674 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
675 if (last_extkey == NULL)
676 last_extkey = dkv;
677 else if (dkv->value.immediate !=
678 last_extkey->value.immediate)
679 last_extkey = dkv;
680 else
681 continue;
684 num_entries += 1;
686 switch (dkv->key.type) {
687 default:
688 case CSR1212_KV_TYPE_IMMEDIATE:
689 case CSR1212_KV_TYPE_CSR_OFFSET:
690 break;
691 case CSR1212_KV_TYPE_LEAF:
692 case CSR1212_KV_TYPE_DIRECTORY:
693 /* Remove from list */
694 if (dkv->prev && (dkv->prev->next == dkv))
695 dkv->prev->next = dkv->next;
696 if (dkv->next && (dkv->next->prev == dkv))
697 dkv->next->prev = dkv->prev;
698 //if (dkv == *layout_tail)
699 // *layout_tail = dkv->prev;
701 /* Special case: Extended ROM leafs */
702 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
703 dkv->value.leaf.len = -1;
704 /* Don't add Extended ROM leafs in the
705 * layout list, they are handled
706 * differently. */
707 break;
710 /* Add to tail of list */
711 dkv->next = NULL;
712 dkv->prev = *layout_tail;
713 (*layout_tail)->next = dkv;
714 *layout_tail = dkv;
715 break;
719 return num_entries;
722 static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
724 struct csr1212_keyval *ltail = kv;
725 size_t agg_size = 0;
727 while (kv) {
728 switch (kv->key.type) {
729 case CSR1212_KV_TYPE_LEAF:
730 /* Add 1 quadlet for crc/len field */
731 agg_size += kv->value.leaf.len + 1;
732 break;
734 case CSR1212_KV_TYPE_DIRECTORY:
735 kv->value.directory.len =
736 csr1212_generate_layout_subdir(kv, &ltail);
737 /* Add 1 quadlet for crc/len field */
738 agg_size += kv->value.directory.len + 1;
739 break;
741 kv = kv->next;
743 return quads_to_bytes(agg_size);
746 static struct csr1212_keyval *
747 csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
748 struct csr1212_keyval *start_kv, int start_pos)
750 struct csr1212_keyval *kv = start_kv;
751 struct csr1212_keyval *okv = start_kv;
752 int pos = start_pos;
753 int kv_len = 0, okv_len = 0;
755 cache->layout_head = kv;
757 while (kv && pos < cache->size) {
758 /* Special case: Extended ROM leafs */
759 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
760 kv->offset = cache->offset + pos;
762 switch (kv->key.type) {
763 case CSR1212_KV_TYPE_LEAF:
764 kv_len = kv->value.leaf.len;
765 break;
767 case CSR1212_KV_TYPE_DIRECTORY:
768 kv_len = kv->value.directory.len;
769 break;
771 default:
772 /* Should never get here */
773 WARN_ON(1);
774 break;
777 pos += quads_to_bytes(kv_len + 1);
779 if (pos <= cache->size) {
780 okv = kv;
781 okv_len = kv_len;
782 kv = kv->next;
786 cache->layout_tail = okv;
787 cache->len = okv->offset - cache->offset + quads_to_bytes(okv_len + 1);
789 return kv;
792 #define CSR1212_KV_KEY_SHIFT 24
793 #define CSR1212_KV_KEY_TYPE_SHIFT 6
794 #define CSR1212_KV_KEY_ID_MASK 0x3f
795 #define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
797 static void
798 csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
800 struct csr1212_dentry *dentry;
801 struct csr1212_keyval *last_extkey_spec = NULL;
802 struct csr1212_keyval *last_extkey = NULL;
803 int index = 0;
805 for (dentry = dir->value.directory.dentries_head;
806 dentry;
807 dentry = dentry->next) {
808 struct csr1212_keyval *a;
810 for (a = dentry->kv; a; a = a->associate) {
811 u32 value = 0;
813 /* Special Case: Extended Key Specifier_ID */
814 if (a->key.id ==
815 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
816 if (last_extkey_spec == NULL)
817 last_extkey_spec = a;
818 else if (a->value.immediate !=
819 last_extkey_spec->value.immediate)
820 last_extkey_spec = a;
821 else
822 continue;
824 /* Special Case: Extended Key */
825 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
826 if (last_extkey == NULL)
827 last_extkey = a;
828 else if (a->value.immediate !=
829 last_extkey->value.immediate)
830 last_extkey = a;
831 else
832 continue;
835 switch (a->key.type) {
836 case CSR1212_KV_TYPE_IMMEDIATE:
837 value = a->value.immediate;
838 break;
839 case CSR1212_KV_TYPE_CSR_OFFSET:
840 value = a->value.csr_offset;
841 break;
842 case CSR1212_KV_TYPE_LEAF:
843 value = a->offset;
844 value -= dir->offset + quads_to_bytes(1+index);
845 value = bytes_to_quads(value);
846 break;
847 case CSR1212_KV_TYPE_DIRECTORY:
848 value = a->offset;
849 value -= dir->offset + quads_to_bytes(1+index);
850 value = bytes_to_quads(value);
851 break;
852 default:
853 /* Should never get here */
854 WARN_ON(1);
855 break;
858 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) <<
859 CSR1212_KV_KEY_SHIFT;
860 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
861 (CSR1212_KV_KEY_SHIFT +
862 CSR1212_KV_KEY_TYPE_SHIFT);
863 data_buffer[index] = cpu_to_be32(value);
864 index++;
869 struct csr1212_keyval_img {
870 u16 length;
871 u16 crc;
873 /* Must be last */
874 u32 data[0]; /* older gcc can't handle [] which is standard */
877 static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
879 struct csr1212_keyval *kv, *nkv;
880 struct csr1212_keyval_img *kvi;
882 for (kv = cache->layout_head;
883 kv != cache->layout_tail->next;
884 kv = nkv) {
885 kvi = (struct csr1212_keyval_img *)(cache->data +
886 bytes_to_quads(kv->offset - cache->offset));
887 switch (kv->key.type) {
888 default:
889 case CSR1212_KV_TYPE_IMMEDIATE:
890 case CSR1212_KV_TYPE_CSR_OFFSET:
891 /* Should never get here */
892 WARN_ON(1);
893 break;
895 case CSR1212_KV_TYPE_LEAF:
896 /* Don't copy over Extended ROM areas, they are
897 * already filled out! */
898 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
899 memcpy(kvi->data, kv->value.leaf.data,
900 quads_to_bytes(kv->value.leaf.len));
902 kvi->length = cpu_to_be16(kv->value.leaf.len);
903 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
904 break;
906 case CSR1212_KV_TYPE_DIRECTORY:
907 csr1212_generate_tree_subdir(kv, kvi->data);
909 kvi->length = cpu_to_be16(kv->value.directory.len);
910 kvi->crc = csr1212_crc16(kvi->data,
911 kv->value.directory.len);
912 break;
915 nkv = kv->next;
916 if (kv->prev)
917 kv->prev->next = NULL;
918 if (kv->next)
919 kv->next->prev = NULL;
920 kv->prev = NULL;
921 kv->next = NULL;
925 /* This size is arbitrarily chosen.
926 * The struct overhead is subtracted for more economic allocations. */
927 #define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache))
929 int csr1212_generate_csr_image(struct csr1212_csr *csr)
931 struct csr1212_bus_info_block_img *bi;
932 struct csr1212_csr_rom_cache *cache;
933 struct csr1212_keyval *kv;
934 size_t agg_size;
935 int ret;
936 int init_offset;
938 BUG_ON(!csr);
940 cache = csr->cache_head;
942 bi = (struct csr1212_bus_info_block_img*)cache->data;
944 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
945 bi->crc_length = bi->length;
946 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
948 csr->root_kv->next = NULL;
949 csr->root_kv->prev = NULL;
951 agg_size = csr1212_generate_layout_order(csr->root_kv);
953 init_offset = csr->bus_info_len;
955 for (kv = csr->root_kv, cache = csr->cache_head;
957 cache = cache->next) {
958 if (!cache) {
959 /* Estimate approximate number of additional cache
960 * regions needed (it assumes that the cache holding
961 * the first 1K Config ROM space always exists). */
962 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
963 (2 * sizeof(u32))) + 1;
965 /* Add additional cache regions, extras will be
966 * removed later */
967 for (; est_c; est_c--) {
968 ret = csr1212_append_new_cache(csr,
969 CSR1212_EXTENDED_ROM_SIZE);
970 if (ret != CSR1212_SUCCESS)
971 return ret;
973 /* Need to re-layout for additional cache regions */
974 agg_size = csr1212_generate_layout_order(csr->root_kv);
975 kv = csr->root_kv;
976 cache = csr->cache_head;
977 init_offset = csr->bus_info_len;
979 kv = csr1212_generate_positions(cache, kv, init_offset);
980 agg_size -= cache->len;
981 init_offset = sizeof(u32);
984 /* Remove unused, excess cache regions */
985 while (cache) {
986 struct csr1212_csr_rom_cache *oc = cache;
988 cache = cache->next;
989 csr1212_remove_cache(csr, oc);
992 /* Go through the list backward so that when done, the correct CRC
993 * will be calculated for the Extended ROM areas. */
994 for (cache = csr->cache_tail; cache; cache = cache->prev) {
995 /* Only Extended ROM caches should have this set. */
996 if (cache->ext_rom) {
997 int leaf_size;
999 /* Make sure the Extended ROM leaf is a multiple of
1000 * max_rom in size. */
1001 BUG_ON(csr->max_rom < 1);
1002 leaf_size = (cache->len + (csr->max_rom - 1)) &
1003 ~(csr->max_rom - 1);
1005 /* Zero out the unused ROM region */
1006 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1007 leaf_size - cache->len);
1009 /* Subtract leaf header */
1010 leaf_size -= sizeof(u32);
1012 /* Update the Extended ROM leaf length */
1013 cache->ext_rom->value.leaf.len =
1014 bytes_to_quads(leaf_size);
1015 } else {
1016 /* Zero out the unused ROM region */
1017 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1018 cache->size - cache->len);
1021 /* Copy the data into the cache buffer */
1022 csr1212_fill_cache(cache);
1024 if (cache != csr->cache_head) {
1025 /* Set the length and CRC of the extended ROM. */
1026 struct csr1212_keyval_img *kvi =
1027 (struct csr1212_keyval_img*)cache->data;
1028 u16 len = bytes_to_quads(cache->len) - 1;
1030 kvi->length = cpu_to_be16(len);
1031 kvi->crc = csr1212_crc16(kvi->data, len);
1035 return CSR1212_SUCCESS;
1038 int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
1040 struct csr1212_csr_rom_cache *cache;
1042 for (cache = csr->cache_head; cache; cache = cache->next)
1043 if (offset >= cache->offset &&
1044 (offset + len) <= (cache->offset + cache->size)) {
1045 memcpy(buffer, &cache->data[
1046 bytes_to_quads(offset - cache->offset)],
1047 len);
1048 return CSR1212_SUCCESS;
1051 return -ENOENT;
1055 * Apparently there are many different wrong implementations of the CRC
1056 * algorithm. We don't fail, we just warn... approximately once per GUID.
1058 static void
1059 csr1212_check_crc(const u32 *buffer, size_t length, u16 crc, __be32 *guid)
1061 static u64 last_bad_eui64;
1062 u64 eui64 = ((u64)be32_to_cpu(guid[0]) << 32) | be32_to_cpu(guid[1]);
1064 if (csr1212_crc16(buffer, length) == crc ||
1065 csr1212_msft_crc16(buffer, length) == crc ||
1066 eui64 == last_bad_eui64)
1067 return;
1069 printk(KERN_DEBUG "ieee1394: config ROM CRC error\n");
1070 last_bad_eui64 = eui64;
1073 /* Parse a chunk of data as a Config ROM */
1075 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1077 struct csr1212_bus_info_block_img *bi;
1078 struct csr1212_cache_region *cr;
1079 int i;
1080 int ret;
1082 for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
1083 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1084 &csr->cache_head->data[bytes_to_quads(i)],
1085 csr->private);
1086 if (ret != CSR1212_SUCCESS)
1087 return ret;
1089 /* check ROM header's info_length */
1090 if (i == 0 &&
1091 be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
1092 bytes_to_quads(csr->bus_info_len) - 1)
1093 return -EINVAL;
1096 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1097 csr->crc_len = quads_to_bytes(bi->crc_length);
1099 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that
1100 * is not always the case, so read the rest of the crc area 1 quadlet at
1101 * a time. */
1102 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
1103 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1104 &csr->cache_head->data[bytes_to_quads(i)],
1105 csr->private);
1106 if (ret != CSR1212_SUCCESS)
1107 return ret;
1110 csr1212_check_crc(bi->data, bi->crc_length, bi->crc,
1111 &csr->bus_info_data[3]);
1113 cr = CSR1212_MALLOC(sizeof(*cr));
1114 if (!cr)
1115 return -ENOMEM;
1117 cr->next = NULL;
1118 cr->prev = NULL;
1119 cr->offset_start = 0;
1120 cr->offset_end = csr->crc_len + 4;
1122 csr->cache_head->filled_head = cr;
1123 csr->cache_head->filled_tail = cr;
1125 return CSR1212_SUCCESS;
1128 #define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
1129 #define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
1130 #define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
1131 #define CSR1212_KV_VAL_MASK 0xffffff
1132 #define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
1134 static int
1135 csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1137 int ret = CSR1212_SUCCESS;
1138 struct csr1212_keyval *k = NULL;
1139 u32 offset;
1140 bool keep_keyval = true;
1142 switch (CSR1212_KV_KEY_TYPE(ki)) {
1143 case CSR1212_KV_TYPE_IMMEDIATE:
1144 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1145 CSR1212_KV_VAL(ki));
1146 if (!k) {
1147 ret = -ENOMEM;
1148 goto out;
1150 /* Don't keep local reference when parsing. */
1151 keep_keyval = false;
1152 break;
1154 case CSR1212_KV_TYPE_CSR_OFFSET:
1155 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1156 CSR1212_KV_VAL(ki));
1157 if (!k) {
1158 ret = -ENOMEM;
1159 goto out;
1161 /* Don't keep local reference when parsing. */
1162 keep_keyval = false;
1163 break;
1165 default:
1166 /* Compute the offset from 0xffff f000 0000. */
1167 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1168 if (offset == kv_pos) {
1169 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1170 * or Directories. The Config ROM image is most likely
1171 * messed up, so we'll just abort here. */
1172 ret = -EIO;
1173 goto out;
1176 k = csr1212_find_keyval_offset(dir, offset);
1178 if (k)
1179 break; /* Found it. */
1181 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY)
1182 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1183 else
1184 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1186 if (!k) {
1187 ret = -ENOMEM;
1188 goto out;
1190 /* Don't keep local reference when parsing. */
1191 keep_keyval = false;
1192 /* Contents not read yet so it's not valid. */
1193 k->valid = 0;
1194 k->offset = offset;
1196 k->prev = dir;
1197 k->next = dir->next;
1198 dir->next->prev = k;
1199 dir->next = k;
1201 ret = __csr1212_attach_keyval_to_directory(dir, k, keep_keyval);
1202 out:
1203 if (ret != CSR1212_SUCCESS && k != NULL)
1204 free_keyval(k);
1205 return ret;
1208 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1209 struct csr1212_csr_rom_cache *cache)
1211 struct csr1212_keyval_img *kvi;
1212 int i;
1213 int ret = CSR1212_SUCCESS;
1214 int kvi_len;
1216 kvi = (struct csr1212_keyval_img*)
1217 &cache->data[bytes_to_quads(kv->offset - cache->offset)];
1218 kvi_len = be16_to_cpu(kvi->length);
1220 /* GUID is wrong in here in case of extended ROM. We don't care. */
1221 csr1212_check_crc(kvi->data, kvi_len, kvi->crc, &cache->data[3]);
1223 switch (kv->key.type) {
1224 case CSR1212_KV_TYPE_DIRECTORY:
1225 for (i = 0; i < kvi_len; i++) {
1226 u32 ki = kvi->data[i];
1228 /* Some devices put null entries in their unit
1229 * directories. If we come across such an entry,
1230 * then skip it. */
1231 if (ki == 0x0)
1232 continue;
1233 ret = csr1212_parse_dir_entry(kv, ki,
1234 kv->offset + quads_to_bytes(i + 1));
1236 kv->value.directory.len = kvi_len;
1237 break;
1239 case CSR1212_KV_TYPE_LEAF:
1240 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1241 size_t size = quads_to_bytes(kvi_len);
1243 kv->value.leaf.data = CSR1212_MALLOC(size);
1244 if (!kv->value.leaf.data) {
1245 ret = -ENOMEM;
1246 goto out;
1249 kv->value.leaf.len = kvi_len;
1250 memcpy(kv->value.leaf.data, kvi->data, size);
1252 break;
1255 kv->valid = 1;
1256 out:
1257 return ret;
1260 static int
1261 csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1263 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1264 struct csr1212_keyval_img *kvi = NULL;
1265 struct csr1212_csr_rom_cache *cache;
1266 int cache_index;
1267 u64 addr;
1268 u32 *cache_ptr;
1269 u16 kv_len = 0;
1271 BUG_ON(!csr || !kv || csr->max_rom < 1);
1273 /* First find which cache the data should be in (or go in if not read
1274 * yet). */
1275 for (cache = csr->cache_head; cache; cache = cache->next)
1276 if (kv->offset >= cache->offset &&
1277 kv->offset < (cache->offset + cache->size))
1278 break;
1280 if (!cache) {
1281 u32 q, cache_size;
1283 /* Only create a new cache for Extended ROM leaves. */
1284 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1285 return -EINVAL;
1287 if (csr->ops->bus_read(csr,
1288 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1289 &q, csr->private))
1290 return -EIO;
1292 kv->value.leaf.len = be32_to_cpu(q) >> 16;
1294 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1295 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1297 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1298 if (!cache)
1299 return -ENOMEM;
1301 kv->value.leaf.data = &cache->data[1];
1302 csr->cache_tail->next = cache;
1303 cache->prev = csr->cache_tail;
1304 cache->next = NULL;
1305 csr->cache_tail = cache;
1306 cache->filled_head =
1307 CSR1212_MALLOC(sizeof(*cache->filled_head));
1308 if (!cache->filled_head)
1309 return -ENOMEM;
1311 cache->filled_head->offset_start = 0;
1312 cache->filled_head->offset_end = sizeof(u32);
1313 cache->filled_tail = cache->filled_head;
1314 cache->filled_head->next = NULL;
1315 cache->filled_head->prev = NULL;
1316 cache->data[0] = q;
1318 /* Don't read the entire extended ROM now. Pieces of it will
1319 * be read when entries inside it are read. */
1320 return csr1212_parse_keyval(kv, cache);
1323 cache_index = kv->offset - cache->offset;
1325 /* Now seach read portions of the cache to see if it is there. */
1326 for (cr = cache->filled_head; cr; cr = cr->next) {
1327 if (cache_index < cr->offset_start) {
1328 newcr = CSR1212_MALLOC(sizeof(*newcr));
1329 if (!newcr)
1330 return -ENOMEM;
1332 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1333 newcr->offset_end = newcr->offset_start;
1334 newcr->next = cr;
1335 newcr->prev = cr->prev;
1336 cr->prev = newcr;
1337 cr = newcr;
1338 break;
1339 } else if ((cache_index >= cr->offset_start) &&
1340 (cache_index < cr->offset_end)) {
1341 kvi = (struct csr1212_keyval_img*)
1342 (&cache->data[bytes_to_quads(cache_index)]);
1343 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1344 break;
1345 } else if (cache_index == cr->offset_end) {
1346 break;
1350 if (!cr) {
1351 cr = cache->filled_tail;
1352 newcr = CSR1212_MALLOC(sizeof(*newcr));
1353 if (!newcr)
1354 return -ENOMEM;
1356 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1357 newcr->offset_end = newcr->offset_start;
1358 newcr->prev = cr;
1359 newcr->next = cr->next;
1360 cr->next = newcr;
1361 cr = newcr;
1362 cache->filled_tail = newcr;
1365 while(!kvi || cr->offset_end < cache_index + kv_len) {
1366 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1367 ~(csr->max_rom - 1))];
1369 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1370 cr->offset_end) & ~(csr->max_rom - 1);
1372 if (csr->ops->bus_read(csr, addr, cache_ptr, csr->private))
1373 return -EIO;
1375 cr->offset_end += csr->max_rom - (cr->offset_end &
1376 (csr->max_rom - 1));
1378 if (!kvi && (cr->offset_end > cache_index)) {
1379 kvi = (struct csr1212_keyval_img*)
1380 (&cache->data[bytes_to_quads(cache_index)]);
1381 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1384 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1385 /* The Leaf or Directory claims its length extends
1386 * beyond the ConfigROM image region and thus beyond the
1387 * end of our cache region. Therefore, we abort now
1388 * rather than seg faulting later. */
1389 return -EIO;
1392 ncr = cr->next;
1394 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1395 /* consolidate region entries */
1396 ncr->offset_start = cr->offset_start;
1398 if (cr->prev)
1399 cr->prev->next = cr->next;
1400 ncr->prev = cr->prev;
1401 if (cache->filled_head == cr)
1402 cache->filled_head = ncr;
1403 CSR1212_FREE(cr);
1404 cr = ncr;
1408 return csr1212_parse_keyval(kv, cache);
1411 struct csr1212_keyval *
1412 csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1414 if (!kv)
1415 return NULL;
1416 if (!kv->valid)
1417 if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
1418 return NULL;
1419 return kv;
1422 int csr1212_parse_csr(struct csr1212_csr *csr)
1424 struct csr1212_dentry *dentry;
1425 int ret;
1427 BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
1429 ret = csr1212_parse_bus_info_block(csr);
1430 if (ret != CSR1212_SUCCESS)
1431 return ret;
1434 * There has been a buggy firmware with bus_info_block.max_rom > 0
1435 * spotted which actually only supported quadlet read requests to the
1436 * config ROM. Therefore read everything quadlet by quadlet regardless
1437 * of what the bus info block says.
1439 csr->max_rom = 4;
1441 csr->cache_head->layout_head = csr->root_kv;
1442 csr->cache_head->layout_tail = csr->root_kv;
1444 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1445 csr->bus_info_len;
1447 csr->root_kv->valid = 0;
1448 csr->root_kv->next = csr->root_kv;
1449 csr->root_kv->prev = csr->root_kv;
1450 ret = csr1212_read_keyval(csr, csr->root_kv);
1451 if (ret != CSR1212_SUCCESS)
1452 return ret;
1454 /* Scan through the Root directory finding all extended ROM regions
1455 * and make cache regions for them */
1456 for (dentry = csr->root_kv->value.directory.dentries_head;
1457 dentry; dentry = dentry->next) {
1458 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1459 !dentry->kv->valid) {
1460 ret = csr1212_read_keyval(csr, dentry->kv);
1461 if (ret != CSR1212_SUCCESS)
1462 return ret;
1466 return CSR1212_SUCCESS;