[PATCH] w1: Make w1 connector notifications depend on connector.
[linux-2.6/verdex.git] / drivers / ieee1394 / csr1212.c
blob586f71e7346a5e7d8fcfbbcb06a4bac543cf7124
1 /*
2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 /* TODO List:
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
34 * - Convenience functions for reading a block of data from a given offset.
37 #ifndef __KERNEL__
38 #include <string.h>
39 #endif
41 #include "csr1212.h"
44 /* Permitted key type for each key id */
45 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
46 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
47 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
48 #define __L (1 << CSR1212_KV_TYPE_LEAF)
49 static const u_int8_t csr1212_key_id_type_map[0x30] = {
50 0, /* Reserved */
51 __D | __L, /* Descriptor */
52 __I | __D | __L, /* Bus_Dependent_Info */
53 __I | __D | __L, /* Vendor */
54 __I, /* Hardware_Version */
55 0, 0, /* Reserved */
56 __D | __L, /* Module */
57 0, 0, 0, 0, /* Reserved */
58 __I, /* Node_Capabilities */
59 __L, /* EUI_64 */
60 0, 0, 0, /* Reserved */
61 __D, /* Unit */
62 __I, /* Specifier_ID */
63 __I, /* Version */
64 __I | __C | __D | __L, /* Dependent_Info */
65 __L, /* Unit_Location */
66 0, /* Reserved */
67 __I, /* Model */
68 __D, /* Instance */
69 __L, /* Keyword */
70 __D, /* Feature */
71 __L, /* Extended_ROM */
72 __I, /* Extended_Key_Specifier_ID */
73 __I, /* Extended_Key */
74 __I | __C | __D | __L, /* Extended_Data */
75 __L, /* Modifiable_Descriptor */
76 __I, /* Directory_ID */
77 __I, /* Revision */
79 #undef __I
80 #undef __C
81 #undef __D
82 #undef __L
85 #define quads_to_bytes(_q) ((_q) * sizeof(u_int32_t))
86 #define bytes_to_quads(_b) (((_b) + sizeof(u_int32_t) - 1) / sizeof(u_int32_t))
88 static inline void free_keyval(struct csr1212_keyval *kv)
90 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
91 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
92 CSR1212_FREE(kv->value.leaf.data);
94 CSR1212_FREE(kv);
97 static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length)
99 int shift;
100 u_int32_t data;
101 u_int16_t sum, crc = 0;
103 for (; length; length--) {
104 data = CSR1212_BE32_TO_CPU(*buffer);
105 buffer++;
106 for (shift = 28; shift >= 0; shift -= 4 ) {
107 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
108 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
110 crc &= 0xffff;
113 return CSR1212_CPU_TO_BE16(crc);
116 #if 0
117 /* Microsoft computes the CRC with the bytes in reverse order. Therefore we
118 * have a special version of the CRC algorithm to account for their buggy
119 * software. */
120 static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
122 int shift;
123 u_int32_t data;
124 u_int16_t sum, crc = 0;
126 for (; length; length--) {
127 data = CSR1212_LE32_TO_CPU(*buffer);
128 buffer++;
129 for (shift = 28; shift >= 0; shift -= 4 ) {
130 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
131 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
133 crc &= 0xffff;
136 return CSR1212_CPU_TO_BE16(crc);
138 #endif
140 static inline struct csr1212_dentry *csr1212_find_keyval(struct csr1212_keyval *dir,
141 struct csr1212_keyval *kv)
143 struct csr1212_dentry *pos;
145 for (pos = dir->value.directory.dentries_head;
146 pos != NULL; pos = pos->next) {
147 if (pos->kv == kv)
148 return pos;
150 return NULL;
154 static inline struct csr1212_keyval *csr1212_find_keyval_offset(struct csr1212_keyval *kv_list,
155 u_int32_t offset)
157 struct csr1212_keyval *kv;
159 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) {
160 if (kv->offset == offset)
161 return kv;
163 return NULL;
167 /* Creation Routines */
168 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
169 size_t bus_info_size, void *private)
171 struct csr1212_csr *csr;
173 csr = CSR1212_MALLOC(sizeof(*csr));
174 if (!csr)
175 return NULL;
177 csr->cache_head =
178 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
179 CSR1212_CONFIG_ROM_SPACE_SIZE);
180 if (!csr->cache_head) {
181 CSR1212_FREE(csr);
182 return NULL;
185 /* The keyval key id is not used for the root node, but a valid key id
186 * that can be used for a directory needs to be passed to
187 * csr1212_new_directory(). */
188 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
189 if (!csr->root_kv) {
190 CSR1212_FREE(csr->cache_head);
191 CSR1212_FREE(csr);
192 return NULL;
195 csr->bus_info_data = csr->cache_head->data;
196 csr->bus_info_len = bus_info_size;
197 csr->crc_len = bus_info_size;
198 csr->ops = ops;
199 csr->private = private;
200 csr->cache_tail = csr->cache_head;
202 return csr;
207 void csr1212_init_local_csr(struct csr1212_csr *csr,
208 const u_int32_t *bus_info_data, int max_rom)
210 static const int mr_map[] = { 4, 64, 1024, 0 };
212 #ifdef __KERNEL__
213 BUG_ON(max_rom & ~0x3);
214 csr->max_rom = mr_map[max_rom];
215 #else
216 if (max_rom & ~0x3) /* caller supplied invalid argument */
217 csr->max_rom = 0;
218 else
219 csr->max_rom = mr_map[max_rom];
220 #endif
221 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
225 static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
227 struct csr1212_keyval *kv;
229 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
230 return NULL;
232 kv = CSR1212_MALLOC(sizeof(*kv));
233 if (!kv)
234 return NULL;
236 kv->key.type = type;
237 kv->key.id = key;
239 kv->associate = NULL;
240 kv->refcnt = 1;
242 kv->next = NULL;
243 kv->prev = NULL;
244 kv->offset = 0;
245 kv->valid = 0;
246 return kv;
249 struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
251 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
253 if (!kv)
254 return NULL;
256 kv->value.immediate = value;
257 kv->valid = 1;
258 return kv;
261 struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t data_len)
263 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
265 if (!kv)
266 return NULL;
268 if (data_len > 0) {
269 kv->value.leaf.data = CSR1212_MALLOC(data_len);
270 if (!kv->value.leaf.data) {
271 CSR1212_FREE(kv);
272 return NULL;
275 if (data)
276 memcpy(kv->value.leaf.data, data, data_len);
277 } else {
278 kv->value.leaf.data = NULL;
281 kv->value.leaf.len = bytes_to_quads(data_len);
282 kv->offset = 0;
283 kv->valid = 1;
285 return kv;
288 struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset)
290 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
292 if (!kv)
293 return NULL;
295 kv->value.csr_offset = csr_offset;
297 kv->offset = 0;
298 kv->valid = 1;
299 return kv;
302 struct csr1212_keyval *csr1212_new_directory(u_int8_t key)
304 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
306 if (!kv)
307 return NULL;
309 kv->value.directory.len = 0;
310 kv->offset = 0;
311 kv->value.directory.dentries_head = NULL;
312 kv->value.directory.dentries_tail = NULL;
313 kv->valid = 1;
314 return kv;
317 int csr1212_associate_keyval(struct csr1212_keyval *kv,
318 struct csr1212_keyval *associate)
320 if (!kv || !associate)
321 return CSR1212_EINVAL;
323 if (kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
324 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
325 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
326 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
327 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
328 associate->key.id < 0x30))
329 return CSR1212_EINVAL;
331 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
332 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY)
333 return CSR1212_EINVAL;
335 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
336 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA)
337 return CSR1212_EINVAL;
339 if (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
340 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID)
341 return CSR1212_EINVAL;
343 if (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
344 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY)
345 return CSR1212_EINVAL;
347 if (kv->associate)
348 csr1212_release_keyval(kv->associate);
350 associate->refcnt++;
351 kv->associate = associate;
353 return CSR1212_SUCCESS;
356 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
357 struct csr1212_keyval *kv)
359 struct csr1212_dentry *dentry;
361 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
362 return CSR1212_EINVAL;
364 dentry = CSR1212_MALLOC(sizeof(*dentry));
365 if (!dentry)
366 return CSR1212_ENOMEM;
368 dentry->kv = kv;
370 kv->refcnt++;
372 dentry->next = NULL;
373 dentry->prev = dir->value.directory.dentries_tail;
375 if (!dir->value.directory.dentries_head)
376 dir->value.directory.dentries_head = dentry;
378 if (dir->value.directory.dentries_tail)
379 dir->value.directory.dentries_tail->next = dentry;
380 dir->value.directory.dentries_tail = dentry;
382 return CSR1212_SUCCESS;
385 struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec, u_int32_t key,
386 u_int32_t value)
388 struct csr1212_keyval *kvs, *kvk, *kvv;
390 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
391 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
392 kvv = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_DATA, value);
394 if (!kvs || !kvk || !kvv) {
395 if (kvs)
396 free_keyval(kvs);
397 if (kvk)
398 free_keyval(kvk);
399 if (kvv)
400 free_keyval(kvv);
401 return NULL;
404 /* Don't keep a local reference to the extended key or value. */
405 kvk->refcnt = 0;
406 kvv->refcnt = 0;
408 csr1212_associate_keyval(kvk, kvv);
409 csr1212_associate_keyval(kvs, kvk);
411 return kvs;
414 struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec, u_int32_t key,
415 const void *data, size_t data_len)
417 struct csr1212_keyval *kvs, *kvk, *kvv;
419 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
420 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
421 kvv = csr1212_new_leaf(CSR1212_KV_ID_EXTENDED_DATA, data, data_len);
423 if (!kvs || !kvk || !kvv) {
424 if (kvs)
425 free_keyval(kvs);
426 if (kvk)
427 free_keyval(kvk);
428 if (kvv)
429 free_keyval(kvv);
430 return NULL;
433 /* Don't keep a local reference to the extended key or value. */
434 kvk->refcnt = 0;
435 kvv->refcnt = 0;
437 csr1212_associate_keyval(kvk, kvv);
438 csr1212_associate_keyval(kvs, kvk);
440 return kvs;
443 struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t specifier_id,
444 const void *data, size_t data_len)
446 struct csr1212_keyval *kv;
448 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
449 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
450 if (!kv)
451 return NULL;
453 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
454 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
456 if (data) {
457 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
460 return kv;
464 struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
465 u_int16_t cset,
466 u_int16_t language,
467 const void *data,
468 size_t data_len)
470 struct csr1212_keyval *kv;
471 char *lstr;
473 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
474 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
475 if (!kv)
476 return NULL;
478 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
479 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
480 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
482 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
484 /* make sure last quadlet is zeroed out */
485 *((u_int32_t*)&(lstr[(data_len - 1) & ~0x3])) = 0;
487 /* don't copy the NUL terminator */
488 memcpy(lstr, data, data_len);
490 return kv;
493 static int csr1212_check_minimal_ascii(const char *s)
495 static const char minimal_ascii_table[] = {
496 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
497 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
500 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
501 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
502 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
503 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
504 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
505 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
506 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
507 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
508 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
509 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
510 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
511 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
513 for (; *s; s++) {
514 if (minimal_ascii_table[*s & 0x7F] != *s)
515 return -1; /* failed */
517 /* String conforms to minimal-ascii, as specified by IEEE 1212,
518 * par. 7.4 */
519 return 0;
522 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
524 /* Check if string conform to minimal_ascii format */
525 if (csr1212_check_minimal_ascii(s))
526 return NULL;
528 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
529 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
532 struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
533 u_int8_t palette_depth,
534 u_int8_t color_space,
535 u_int16_t language,
536 u_int16_t hscan,
537 u_int16_t vscan,
538 u_int32_t *palette,
539 u_int32_t *pixels)
541 static const int pd[4] = { 0, 4, 16, 256 };
542 static const int cs[16] = { 4, 2 };
543 struct csr1212_keyval *kv;
544 int palette_size;
545 int pixel_size = (hscan * vscan + 3) & ~0x3;
547 if (!pixels || (!palette && palette_depth) ||
548 (palette_depth & ~0x3) || (color_space & ~0xf))
549 return NULL;
551 palette_size = pd[palette_depth] * cs[color_space];
553 kv = csr1212_new_descriptor_leaf(1, 0, NULL,
554 palette_size + pixel_size +
555 CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD);
556 if (!kv)
557 return NULL;
559 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version);
560 CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth);
561 CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space);
562 CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
563 CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan);
564 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan);
566 if (palette_size)
567 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv), palette,
568 palette_size);
570 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(kv), pixels, pixel_size);
572 return kv;
575 struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
576 u_int64_t address)
578 struct csr1212_keyval *kv;
580 /* IEEE 1212, par. 7.5.4.3 Modifiable descriptors */
581 kv = csr1212_new_leaf(CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR, NULL, sizeof(u_int64_t));
582 if(!kv)
583 return NULL;
585 CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, max_size);
586 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, address);
587 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, address);
589 return kv;
592 static int csr1212_check_keyword(const char *s)
594 for (; *s; s++) {
596 if (('A' <= *s) && (*s <= 'Z'))
597 continue;
598 if (('0' <= *s) && (*s <= '9'))
599 continue;
600 if (*s == '-')
601 continue;
603 return -1; /* failed */
605 /* String conforms to keyword, as specified by IEEE 1212,
606 * par. 7.6.5 */
607 return CSR1212_SUCCESS;
610 struct csr1212_keyval *csr1212_new_keyword_leaf(int strc, const char *strv[])
612 struct csr1212_keyval *kv;
613 char *buffer;
614 int i, data_len = 0;
616 /* Check all keywords to see if they conform to restrictions:
617 * Only the following characters is allowed ['A'..'Z','0'..'9','-']
618 * Each word is zero-terminated.
619 * Also calculate the total length of the keywords.
621 for (i = 0; i < strc; i++) {
622 if (!strv[i] || csr1212_check_keyword(strv[i])) {
623 return NULL;
625 data_len += strlen(strv[i]) + 1; /* Add zero-termination char. */
628 /* IEEE 1212, par. 7.6.5 Keyword leaves */
629 kv = csr1212_new_leaf(CSR1212_KV_ID_KEYWORD, NULL, data_len);
630 if (!kv)
631 return NULL;
633 buffer = (char *)kv->value.leaf.data;
635 /* make sure last quadlet is zeroed out */
636 *((u_int32_t*)&(buffer[(data_len - 1) & ~0x3])) = 0;
638 /* Copy keyword(s) into leaf data buffer */
639 for (i = 0; i < strc; i++) {
640 int len = strlen(strv[i]) + 1;
641 memcpy(buffer, strv[i], len);
642 buffer += len;
644 return kv;
648 /* Destruction Routines */
650 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
651 struct csr1212_keyval *kv)
653 struct csr1212_dentry *dentry;
655 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
656 return;
658 dentry = csr1212_find_keyval(dir, kv);
660 if (!dentry)
661 return;
663 if (dentry->prev)
664 dentry->prev->next = dentry->next;
665 if (dentry->next)
666 dentry->next->prev = dentry->prev;
667 if (dir->value.directory.dentries_head == dentry)
668 dir->value.directory.dentries_head = dentry->next;
669 if (dir->value.directory.dentries_tail == dentry)
670 dir->value.directory.dentries_tail = dentry->prev;
672 CSR1212_FREE(dentry);
674 csr1212_release_keyval(kv);
678 void csr1212_disassociate_keyval(struct csr1212_keyval *kv)
680 if (kv->associate) {
681 csr1212_release_keyval(kv->associate);
684 kv->associate = NULL;
688 /* This function is used to free the memory taken by a keyval. If the given
689 * keyval is a directory type, then any keyvals contained in that directory
690 * will be destroyed as well if their respective refcnts are 0. By means of
691 * list manipulation, this routine will descend a directory structure in a
692 * non-recursive manner. */
693 void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
695 struct csr1212_keyval *k, *a;
696 struct csr1212_dentry dentry;
697 struct csr1212_dentry *head, *tail;
699 dentry.kv = kv;
700 dentry.next = NULL;
701 dentry.prev = NULL;
703 head = &dentry;
704 tail = head;
706 while (head) {
707 k = head->kv;
709 while (k) {
710 k->refcnt--;
712 if (k->refcnt > 0)
713 break;
715 a = k->associate;
717 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
718 /* If the current entry is a directory, then move all
719 * the entries to the destruction list. */
720 if (k->value.directory.dentries_head) {
721 tail->next = k->value.directory.dentries_head;
722 k->value.directory.dentries_head->prev = tail;
723 tail = k->value.directory.dentries_tail;
726 free_keyval(k);
727 k = a;
730 head = head->next;
731 if (head) {
732 if (head->prev && head->prev != &dentry) {
733 CSR1212_FREE(head->prev);
735 head->prev = NULL;
736 } else if (tail != &dentry)
737 CSR1212_FREE(tail);
742 void csr1212_destroy_csr(struct csr1212_csr *csr)
744 struct csr1212_csr_rom_cache *c, *oc;
745 struct csr1212_cache_region *cr, *ocr;
747 csr1212_release_keyval(csr->root_kv);
749 c = csr->cache_head;
750 while (c) {
751 oc = c;
752 cr = c->filled_head;
753 while (cr) {
754 ocr = cr;
755 cr = cr->next;
756 CSR1212_FREE(ocr);
758 c = c->next;
759 CSR1212_FREE(oc);
762 CSR1212_FREE(csr);
767 /* CSR Image Creation */
769 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
771 struct csr1212_csr_rom_cache *cache;
772 u_int64_t csr_addr;
774 if (!csr || !csr->ops || !csr->ops->allocate_addr_range ||
775 !csr->ops->release_addr || csr->max_rom < 1)
776 return CSR1212_EINVAL;
778 /* ROM size must be a multiple of csr->max_rom */
779 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
781 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
782 if (csr_addr == CSR1212_INVALID_ADDR_SPACE) {
783 return CSR1212_ENOMEM;
785 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
786 /* Invalid address returned from allocate_addr_range(). */
787 csr->ops->release_addr(csr_addr, csr->private);
788 return CSR1212_ENOMEM;
791 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
792 if (!cache) {
793 csr->ops->release_addr(csr_addr, csr->private);
794 return CSR1212_ENOMEM;
797 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
798 if (!cache->ext_rom) {
799 csr->ops->release_addr(csr_addr, csr->private);
800 CSR1212_FREE(cache);
801 return CSR1212_ENOMEM;
804 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
805 csr1212_release_keyval(cache->ext_rom);
806 csr->ops->release_addr(csr_addr, csr->private);
807 CSR1212_FREE(cache);
808 return CSR1212_ENOMEM;
810 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
811 cache->ext_rom->value.leaf.len = -1;
812 cache->ext_rom->value.leaf.data = cache->data;
814 /* Add cache to tail of cache list */
815 cache->prev = csr->cache_tail;
816 csr->cache_tail->next = cache;
817 csr->cache_tail = cache;
818 return CSR1212_SUCCESS;
821 static inline void csr1212_remove_cache(struct csr1212_csr *csr,
822 struct csr1212_csr_rom_cache *cache)
824 if (csr->cache_head == cache)
825 csr->cache_head = cache->next;
826 if (csr->cache_tail == cache)
827 csr->cache_tail = cache->prev;
829 if (cache->prev)
830 cache->prev->next = cache->next;
831 if (cache->next)
832 cache->next->prev = cache->prev;
834 if (cache->ext_rom) {
835 csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
836 csr1212_release_keyval(cache->ext_rom);
839 CSR1212_FREE(cache);
842 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
843 struct csr1212_keyval **layout_tail)
845 struct csr1212_dentry *dentry;
846 struct csr1212_keyval *dkv;
847 struct csr1212_keyval *last_extkey_spec = NULL;
848 struct csr1212_keyval *last_extkey = NULL;
849 int num_entries = 0;
851 for (dentry = dir->value.directory.dentries_head; dentry;
852 dentry = dentry->next) {
853 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
854 /* Special Case: Extended Key Specifier_ID */
855 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
856 if (last_extkey_spec == NULL) {
857 last_extkey_spec = dkv;
858 } else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
859 last_extkey_spec = dkv;
860 } else {
861 continue;
863 /* Special Case: Extended Key */
864 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
865 if (last_extkey == NULL) {
866 last_extkey = dkv;
867 } else if (dkv->value.immediate != last_extkey->value.immediate) {
868 last_extkey = dkv;
869 } else {
870 continue;
874 num_entries += 1;
876 switch(dkv->key.type) {
877 default:
878 case CSR1212_KV_TYPE_IMMEDIATE:
879 case CSR1212_KV_TYPE_CSR_OFFSET:
880 break;
881 case CSR1212_KV_TYPE_LEAF:
882 case CSR1212_KV_TYPE_DIRECTORY:
883 /* Remove from list */
884 if (dkv->prev && (dkv->prev->next == dkv))
885 dkv->prev->next = dkv->next;
886 if (dkv->next && (dkv->next->prev == dkv))
887 dkv->next->prev = dkv->prev;
888 //if (dkv == *layout_tail)
889 // *layout_tail = dkv->prev;
891 /* Special case: Extended ROM leafs */
892 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
893 dkv->value.leaf.len = -1;
894 /* Don't add Extended ROM leafs in the layout list,
895 * they are handled differently. */
896 break;
899 /* Add to tail of list */
900 dkv->next = NULL;
901 dkv->prev = *layout_tail;
902 (*layout_tail)->next = dkv;
903 *layout_tail = dkv;
904 break;
908 return num_entries;
911 size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
913 struct csr1212_keyval *ltail = kv;
914 size_t agg_size = 0;
916 while(kv) {
917 switch(kv->key.type) {
918 case CSR1212_KV_TYPE_LEAF:
919 /* Add 1 quadlet for crc/len field */
920 agg_size += kv->value.leaf.len + 1;
921 break;
923 case CSR1212_KV_TYPE_DIRECTORY:
924 kv->value.directory.len = csr1212_generate_layout_subdir(kv, &ltail);
925 /* Add 1 quadlet for crc/len field */
926 agg_size += kv->value.directory.len + 1;
927 break;
929 kv = kv->next;
931 return quads_to_bytes(agg_size);
934 struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
935 struct csr1212_keyval *start_kv,
936 int start_pos)
938 struct csr1212_keyval *kv = start_kv;
939 struct csr1212_keyval *okv = start_kv;
940 int pos = start_pos;
941 int kv_len = 0, okv_len = 0;
943 cache->layout_head = kv;
945 while(kv && pos < cache->size) {
946 /* Special case: Extended ROM leafs */
947 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
948 kv->offset = cache->offset + pos;
951 switch(kv->key.type) {
952 case CSR1212_KV_TYPE_LEAF:
953 kv_len = kv->value.leaf.len;
954 break;
956 case CSR1212_KV_TYPE_DIRECTORY:
957 kv_len = kv->value.directory.len;
958 break;
960 default:
961 /* Should never get here */
962 break;
965 pos += quads_to_bytes(kv_len + 1);
967 if (pos <= cache->size) {
968 okv = kv;
969 okv_len = kv_len;
970 kv = kv->next;
974 cache->layout_tail = okv;
975 cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
977 return kv;
980 static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir,
981 u_int32_t *data_buffer)
983 struct csr1212_dentry *dentry;
984 struct csr1212_keyval *last_extkey_spec = NULL;
985 struct csr1212_keyval *last_extkey = NULL;
986 int index = 0;
988 for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
989 struct csr1212_keyval *a;
991 for (a = dentry->kv; a; a = a->associate) {
992 u_int32_t value = 0;
994 /* Special Case: Extended Key Specifier_ID */
995 if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
996 if (last_extkey_spec == NULL) {
997 last_extkey_spec = a;
998 } else if (a->value.immediate != last_extkey_spec->value.immediate) {
999 last_extkey_spec = a;
1000 } else {
1001 continue;
1003 /* Special Case: Extended Key */
1004 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
1005 if (last_extkey == NULL) {
1006 last_extkey = a;
1007 } else if (a->value.immediate != last_extkey->value.immediate) {
1008 last_extkey = a;
1009 } else {
1010 continue;
1014 switch(a->key.type) {
1015 case CSR1212_KV_TYPE_IMMEDIATE:
1016 value = a->value.immediate;
1017 break;
1018 case CSR1212_KV_TYPE_CSR_OFFSET:
1019 value = a->value.csr_offset;
1020 break;
1021 case CSR1212_KV_TYPE_LEAF:
1022 value = a->offset;
1023 value -= dir->offset + quads_to_bytes(1+index);
1024 value = bytes_to_quads(value);
1025 break;
1026 case CSR1212_KV_TYPE_DIRECTORY:
1027 value = a->offset;
1028 value -= dir->offset + quads_to_bytes(1+index);
1029 value = bytes_to_quads(value);
1030 break;
1031 default:
1032 /* Should never get here */
1033 break; /* GDB breakpoint */
1036 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
1037 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
1038 (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
1039 data_buffer[index] = CSR1212_CPU_TO_BE32(value);
1040 index++;
1045 void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
1047 struct csr1212_keyval *kv, *nkv;
1048 struct csr1212_keyval_img *kvi;
1050 for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
1051 kvi = (struct csr1212_keyval_img *)
1052 (cache->data + bytes_to_quads(kv->offset - cache->offset));
1053 switch(kv->key.type) {
1054 default:
1055 case CSR1212_KV_TYPE_IMMEDIATE:
1056 case CSR1212_KV_TYPE_CSR_OFFSET:
1057 /* Should never get here */
1058 break; /* GDB breakpoint */
1060 case CSR1212_KV_TYPE_LEAF:
1061 /* Don't copy over Extended ROM areas, they are
1062 * already filled out! */
1063 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1064 memcpy(kvi->data, kv->value.leaf.data,
1065 quads_to_bytes(kv->value.leaf.len));
1067 kvi->length = CSR1212_CPU_TO_BE16(kv->value.leaf.len);
1068 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
1069 break;
1071 case CSR1212_KV_TYPE_DIRECTORY:
1072 csr1212_generate_tree_subdir(kv, kvi->data);
1074 kvi->length = CSR1212_CPU_TO_BE16(kv->value.directory.len);
1075 kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
1076 break;
1079 nkv = kv->next;
1080 if (kv->prev)
1081 kv->prev->next = NULL;
1082 if (kv->next)
1083 kv->next->prev = NULL;
1084 kv->prev = NULL;
1085 kv->next = NULL;
1089 int csr1212_generate_csr_image(struct csr1212_csr *csr)
1091 struct csr1212_bus_info_block_img *bi;
1092 struct csr1212_csr_rom_cache *cache;
1093 struct csr1212_keyval *kv;
1094 size_t agg_size;
1095 int ret;
1096 int init_offset;
1098 if (!csr)
1099 return CSR1212_EINVAL;
1101 cache = csr->cache_head;
1103 bi = (struct csr1212_bus_info_block_img*)cache->data;
1105 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
1106 bi->crc_length = bi->length;
1107 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
1109 csr->root_kv->next = NULL;
1110 csr->root_kv->prev = NULL;
1112 agg_size = csr1212_generate_layout_order(csr->root_kv);
1114 init_offset = csr->bus_info_len;
1116 for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
1117 if (!cache) {
1118 /* Estimate approximate number of additional cache
1119 * regions needed (it assumes that the cache holding
1120 * the first 1K Config ROM space always exists). */
1121 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
1122 (2 * sizeof(u_int32_t))) + 1;
1124 /* Add additional cache regions, extras will be
1125 * removed later */
1126 for (; est_c; est_c--) {
1127 ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
1128 if (ret != CSR1212_SUCCESS)
1129 return ret;
1131 /* Need to re-layout for additional cache regions */
1132 agg_size = csr1212_generate_layout_order(csr->root_kv);
1133 kv = csr->root_kv;
1134 cache = csr->cache_head;
1135 init_offset = csr->bus_info_len;
1137 kv = csr1212_generate_positions(cache, kv, init_offset);
1138 agg_size -= cache->len;
1139 init_offset = sizeof(u_int32_t);
1142 /* Remove unused, excess cache regions */
1143 while (cache) {
1144 struct csr1212_csr_rom_cache *oc = cache;
1146 cache = cache->next;
1147 csr1212_remove_cache(csr, oc);
1150 /* Go through the list backward so that when done, the correct CRC
1151 * will be calculated for the Extended ROM areas. */
1152 for(cache = csr->cache_tail; cache; cache = cache->prev) {
1153 /* Only Extended ROM caches should have this set. */
1154 if (cache->ext_rom) {
1155 int leaf_size;
1157 /* Make sure the Extended ROM leaf is a multiple of
1158 * max_rom in size. */
1159 if (csr->max_rom < 1)
1160 return CSR1212_EINVAL;
1161 leaf_size = (cache->len + (csr->max_rom - 1)) &
1162 ~(csr->max_rom - 1);
1164 /* Zero out the unused ROM region */
1165 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1166 leaf_size - cache->len);
1168 /* Subtract leaf header */
1169 leaf_size -= sizeof(u_int32_t);
1171 /* Update the Extended ROM leaf length */
1172 cache->ext_rom->value.leaf.len =
1173 bytes_to_quads(leaf_size);
1174 } else {
1175 /* Zero out the unused ROM region */
1176 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1177 cache->size - cache->len);
1180 /* Copy the data into the cache buffer */
1181 csr1212_fill_cache(cache);
1183 if (cache != csr->cache_head) {
1184 /* Set the length and CRC of the extended ROM. */
1185 struct csr1212_keyval_img *kvi =
1186 (struct csr1212_keyval_img*)cache->data;
1188 kvi->length = CSR1212_CPU_TO_BE16(bytes_to_quads(cache->len) - 1);
1189 kvi->crc = csr1212_crc16(kvi->data,
1190 bytes_to_quads(cache->len) - 1);
1195 return CSR1212_SUCCESS;
1198 int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer, u_int32_t len)
1200 struct csr1212_csr_rom_cache *cache;
1202 for (cache = csr->cache_head; cache; cache = cache->next) {
1203 if (offset >= cache->offset &&
1204 (offset + len) <= (cache->offset + cache->size)) {
1205 memcpy(buffer,
1206 &cache->data[bytes_to_quads(offset - cache->offset)],
1207 len);
1208 return CSR1212_SUCCESS;
1211 return CSR1212_ENOENT;
1216 /* Parse a chunk of data as a Config ROM */
1218 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1220 struct csr1212_bus_info_block_img *bi;
1221 struct csr1212_cache_region *cr;
1222 int i;
1223 int ret;
1225 /* IEEE 1212 says that the entire bus info block should be readable in
1226 * a single transaction regardless of the max_rom value.
1227 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1228 * bus info block will be read 1 quadlet at a time. The rest of the
1229 * ConfigROM will be read according to the max_rom field. */
1230 for (i = 0; i < csr->bus_info_len; i += sizeof(csr1212_quad_t)) {
1231 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1232 sizeof(csr1212_quad_t),
1233 &csr->cache_head->data[bytes_to_quads(i)],
1234 csr->private);
1235 if (ret != CSR1212_SUCCESS)
1236 return ret;
1239 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1240 csr->crc_len = quads_to_bytes(bi->crc_length);
1242 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
1243 * always the case, so read the rest of the crc area 1 quadlet at a time. */
1244 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(csr1212_quad_t)) {
1245 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1246 sizeof(csr1212_quad_t),
1247 &csr->cache_head->data[bytes_to_quads(i)],
1248 csr->private);
1249 if (ret != CSR1212_SUCCESS)
1250 return ret;
1253 if (bytes_to_quads(csr->bus_info_len - sizeof(csr1212_quad_t)) != bi->length)
1254 return CSR1212_EINVAL;
1256 #if 0
1257 /* Apparently there are too many differnt wrong implementations of the
1258 * CRC algorithm that verifying them is moot. */
1259 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1260 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1261 return CSR1212_EINVAL;
1262 #endif
1264 cr = CSR1212_MALLOC(sizeof(*cr));
1265 if (!cr)
1266 return CSR1212_ENOMEM;
1268 cr->next = NULL;
1269 cr->prev = NULL;
1270 cr->offset_start = 0;
1271 cr->offset_end = csr->crc_len + 4;
1273 csr->cache_head->filled_head = cr;
1274 csr->cache_head->filled_tail = cr;
1276 return CSR1212_SUCCESS;
1279 static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1280 csr1212_quad_t ki,
1281 u_int32_t kv_pos)
1283 int ret = CSR1212_SUCCESS;
1284 struct csr1212_keyval *k = NULL;
1285 u_int32_t offset;
1287 switch(CSR1212_KV_KEY_TYPE(ki)) {
1288 case CSR1212_KV_TYPE_IMMEDIATE:
1289 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1290 CSR1212_KV_VAL(ki));
1291 if (!k) {
1292 ret = CSR1212_ENOMEM;
1293 goto fail;
1296 k->refcnt = 0; /* Don't keep local reference when parsing. */
1297 break;
1299 case CSR1212_KV_TYPE_CSR_OFFSET:
1300 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1301 CSR1212_KV_VAL(ki));
1302 if (!k) {
1303 ret = CSR1212_ENOMEM;
1304 goto fail;
1306 k->refcnt = 0; /* Don't keep local reference when parsing. */
1307 break;
1309 default:
1310 /* Compute the offset from 0xffff f000 0000. */
1311 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1312 if (offset == kv_pos) {
1313 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1314 * or Directories. The Config ROM image is most likely
1315 * messed up, so we'll just abort here. */
1316 ret = CSR1212_EIO;
1317 goto fail;
1320 k = csr1212_find_keyval_offset(dir, offset);
1322 if (k)
1323 break; /* Found it. */
1325 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
1326 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1327 } else {
1328 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1330 if (!k) {
1331 ret = CSR1212_ENOMEM;
1332 goto fail;
1334 k->refcnt = 0; /* Don't keep local reference when parsing. */
1335 k->valid = 0; /* Contents not read yet so it's not valid. */
1336 k->offset = offset;
1338 k->prev = dir;
1339 k->next = dir->next;
1340 dir->next->prev = k;
1341 dir->next = k;
1343 ret = csr1212_attach_keyval_to_directory(dir, k);
1345 fail:
1346 if (ret != CSR1212_SUCCESS) {
1347 if (k)
1348 free_keyval(k);
1350 return ret;
1354 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1355 struct csr1212_csr_rom_cache *cache)
1357 struct csr1212_keyval_img *kvi;
1358 int i;
1359 int ret = CSR1212_SUCCESS;
1360 int kvi_len;
1362 kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
1363 cache->offset)];
1364 kvi_len = CSR1212_BE16_TO_CPU(kvi->length);
1366 #if 0
1367 /* Apparently there are too many differnt wrong implementations of the
1368 * CRC algorithm that verifying them is moot. */
1369 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1370 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
1371 ret = CSR1212_EINVAL;
1372 goto fail;
1374 #endif
1376 switch(kv->key.type) {
1377 case CSR1212_KV_TYPE_DIRECTORY:
1378 for (i = 0; i < kvi_len; i++) {
1379 csr1212_quad_t ki = kvi->data[i];
1381 /* Some devices put null entries in their unit
1382 * directories. If we come across such an entry,
1383 * then skip it. */
1384 if (ki == 0x0)
1385 continue;
1386 ret = csr1212_parse_dir_entry(kv, ki,
1387 (kv->offset +
1388 quads_to_bytes(i + 1)));
1390 kv->value.directory.len = kvi_len;
1391 break;
1393 case CSR1212_KV_TYPE_LEAF:
1394 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1395 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1396 if (!kv->value.leaf.data) {
1397 ret = CSR1212_ENOMEM;
1398 goto fail;
1401 kv->value.leaf.len = kvi_len;
1402 memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
1404 break;
1407 kv->valid = 1;
1409 fail:
1410 return ret;
1414 int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1416 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1417 struct csr1212_keyval_img *kvi = NULL;
1418 struct csr1212_csr_rom_cache *cache;
1419 int cache_index;
1420 u_int64_t addr;
1421 u_int32_t *cache_ptr;
1422 u_int16_t kv_len = 0;
1424 if (!csr || !kv || csr->max_rom < 1)
1425 return CSR1212_EINVAL;
1427 /* First find which cache the data should be in (or go in if not read
1428 * yet). */
1429 for (cache = csr->cache_head; cache; cache = cache->next) {
1430 if (kv->offset >= cache->offset &&
1431 kv->offset < (cache->offset + cache->size))
1432 break;
1435 if (!cache) {
1436 csr1212_quad_t q;
1437 u_int32_t cache_size;
1439 /* Only create a new cache for Extended ROM leaves. */
1440 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1441 return CSR1212_EINVAL;
1443 if (csr->ops->bus_read(csr,
1444 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1445 sizeof(csr1212_quad_t), &q, csr->private)) {
1446 return CSR1212_EIO;
1449 kv->value.leaf.len = CSR1212_BE32_TO_CPU(q) >> 16;
1451 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1452 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1454 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1455 if (!cache)
1456 return CSR1212_ENOMEM;
1458 kv->value.leaf.data = &cache->data[1];
1459 csr->cache_tail->next = cache;
1460 cache->prev = csr->cache_tail;
1461 cache->next = NULL;
1462 csr->cache_tail = cache;
1463 cache->filled_head =
1464 CSR1212_MALLOC(sizeof(*cache->filled_head));
1465 if (!cache->filled_head) {
1466 return CSR1212_ENOMEM;
1469 cache->filled_head->offset_start = 0;
1470 cache->filled_head->offset_end = sizeof(csr1212_quad_t);
1471 cache->filled_tail = cache->filled_head;
1472 cache->filled_head->next = NULL;
1473 cache->filled_head->prev = NULL;
1474 cache->data[0] = q;
1476 /* Don't read the entire extended ROM now. Pieces of it will
1477 * be read when entries inside it are read. */
1478 return csr1212_parse_keyval(kv, cache);
1481 cache_index = kv->offset - cache->offset;
1483 /* Now seach read portions of the cache to see if it is there. */
1484 for (cr = cache->filled_head; cr; cr = cr->next) {
1485 if (cache_index < cr->offset_start) {
1486 newcr = CSR1212_MALLOC(sizeof(*newcr));
1487 if (!newcr)
1488 return CSR1212_ENOMEM;
1490 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1491 newcr->offset_end = newcr->offset_start;
1492 newcr->next = cr;
1493 newcr->prev = cr->prev;
1494 cr->prev = newcr;
1495 cr = newcr;
1496 break;
1497 } else if ((cache_index >= cr->offset_start) &&
1498 (cache_index < cr->offset_end)) {
1499 kvi = (struct csr1212_keyval_img*)
1500 (&cache->data[bytes_to_quads(cache_index)]);
1501 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1503 break;
1504 } else if (cache_index == cr->offset_end)
1505 break;
1508 if (!cr) {
1509 cr = cache->filled_tail;
1510 newcr = CSR1212_MALLOC(sizeof(*newcr));
1511 if (!newcr)
1512 return CSR1212_ENOMEM;
1514 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1515 newcr->offset_end = newcr->offset_start;
1516 newcr->prev = cr;
1517 newcr->next = cr->next;
1518 cr->next = newcr;
1519 cr = newcr;
1520 cache->filled_tail = newcr;
1523 while(!kvi || cr->offset_end < cache_index + kv_len) {
1524 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1525 ~(csr->max_rom - 1))];
1527 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1528 cr->offset_end) & ~(csr->max_rom - 1);
1530 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1531 csr->private)) {
1532 if (csr->max_rom == 4)
1533 /* We've got problems! */
1534 return CSR1212_EIO;
1536 /* Apperently the max_rom value was a lie, set it to
1537 * do quadlet reads and try again. */
1538 csr->max_rom = 4;
1539 continue;
1542 cr->offset_end += csr->max_rom - (cr->offset_end &
1543 (csr->max_rom - 1));
1545 if (!kvi && (cr->offset_end > cache_index)) {
1546 kvi = (struct csr1212_keyval_img*)
1547 (&cache->data[bytes_to_quads(cache_index)]);
1548 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1552 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1553 /* The Leaf or Directory claims its length extends
1554 * beyond the ConfigROM image region and thus beyond the
1555 * end of our cache region. Therefore, we abort now
1556 * rather than seg faulting later. */
1557 return CSR1212_EIO;
1560 ncr = cr->next;
1562 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1563 /* consolidate region entries */
1564 ncr->offset_start = cr->offset_start;
1566 if (cr->prev)
1567 cr->prev->next = cr->next;
1568 ncr->prev = cr->prev;
1569 if (cache->filled_head == cr)
1570 cache->filled_head = ncr;
1571 CSR1212_FREE(cr);
1572 cr = ncr;
1576 return csr1212_parse_keyval(kv, cache);
1581 int csr1212_parse_csr(struct csr1212_csr *csr)
1583 static const int mr_map[] = { 4, 64, 1024, 0 };
1584 struct csr1212_dentry *dentry;
1585 int ret;
1587 if (!csr || !csr->ops || !csr->ops->bus_read)
1588 return CSR1212_EINVAL;
1590 ret = csr1212_parse_bus_info_block(csr);
1591 if (ret != CSR1212_SUCCESS)
1592 return ret;
1594 if (!csr->ops->get_max_rom)
1595 csr->max_rom = mr_map[0]; /* default value */
1596 else {
1597 int i = csr->ops->get_max_rom(csr->bus_info_data,
1598 csr->private);
1599 if (i & ~0x3)
1600 return CSR1212_EINVAL;
1601 csr->max_rom = mr_map[i];
1604 csr->cache_head->layout_head = csr->root_kv;
1605 csr->cache_head->layout_tail = csr->root_kv;
1607 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1608 csr->bus_info_len;
1610 csr->root_kv->valid = 0;
1611 csr->root_kv->next = csr->root_kv;
1612 csr->root_kv->prev = csr->root_kv;
1613 ret = _csr1212_read_keyval(csr, csr->root_kv);
1614 if (ret != CSR1212_SUCCESS)
1615 return ret;
1617 /* Scan through the Root directory finding all extended ROM regions
1618 * and make cache regions for them */
1619 for (dentry = csr->root_kv->value.directory.dentries_head;
1620 dentry; dentry = dentry->next) {
1621 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1622 !dentry->kv->valid) {
1623 ret = _csr1212_read_keyval(csr, dentry->kv);
1624 if (ret != CSR1212_SUCCESS)
1625 return ret;
1629 return CSR1212_SUCCESS;