motu: the 4pre channel layout within packets is now believed to be correct. Thanks...
[ffado.git] / libffado / src / libieee1394 / csr1212.c
blob93c5ede189f99bb949d685701093ec25376516e4
1 /*
2 * This file is part of FFADO
3 * FFADO = Free Firewire (pro-)audio drivers for linux
5 * FFADO is based upon FreeBoB.
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 2 of the License, or
10 * (at your option) version 3 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
24 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
25 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions are met:
30 * 1. Redistributions of source code must retain the above copyright notice,
31 * this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 * 3. The name of the author may not be used to endorse or promote products
36 * derived from this software without specific prior written permission.
38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
41 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
43 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
44 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
45 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
46 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
47 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 /* TODO List:
52 * - Verify interface consistency: i.e., public functions that take a size
53 * parameter expect size to be in bytes.
54 * - Convenience functions for reading a block of data from a given offset.
57 #ifndef __KERNEL__
58 #include <string.h>
59 #endif
61 #include "csr1212.h"
64 /* Permitted key type for each key id */
65 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
66 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
67 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
68 #define __L (1 << CSR1212_KV_TYPE_LEAF)
69 static const u_int8_t csr1212_key_id_type_map[0x30] = {
70 0, /* Reserved */
71 __D | __L, /* Descriptor */
72 __I | __D | __L, /* Bus_Dependent_Info */
73 __I | __D | __L, /* Vendor */
74 __I, /* Hardware_Version */
75 0, 0, /* Reserved */
76 __D | __L, /* Module */
77 0, 0, 0, 0, /* Reserved */
78 __I, /* Node_Capabilities */
79 __L, /* EUI_64 */
80 0, 0, 0, /* Reserved */
81 __D, /* Unit */
82 __I, /* Specifier_ID */
83 __I, /* Version */
84 __I | __C | __D | __L, /* Dependent_Info */
85 __L, /* Unit_Location */
86 0, /* Reserved */
87 __I, /* Model */
88 __D, /* Instance */
89 __L, /* Keyword */
90 __D, /* Feature */
91 __L, /* Extended_ROM */
92 __I, /* Extended_Key_Specifier_ID */
93 __I, /* Extended_Key */
94 __I | __C | __D | __L, /* Extended_Data */
95 __L, /* Modifiable_Descriptor */
96 __I, /* Directory_ID */
97 __I, /* Revision */
99 #undef __I
100 #undef __C
101 #undef __D
102 #undef __L
105 #define quads_to_bytes(_q) ((_q) * sizeof(u_int32_t))
106 #define bytes_to_quads(_b) (((_b) + sizeof(u_int32_t) - 1) / sizeof(u_int32_t))
108 static inline void free_keyval(struct csr1212_keyval *kv)
110 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
111 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
112 CSR1212_FREE(kv->value.leaf.data);
114 CSR1212_FREE(kv);
117 static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length)
119 int shift;
120 u_int32_t data;
121 u_int16_t sum, crc = 0;
123 for (; length; length--) {
124 data = CSR1212_BE32_TO_CPU(*buffer);
125 buffer++;
126 for (shift = 28; shift >= 0; shift -= 4 ) {
127 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
128 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
130 crc &= 0xffff;
133 return CSR1212_CPU_TO_BE16(crc);
136 #if 0
137 /* Microsoft computes the CRC with the bytes in reverse order. Therefore we
138 * have a special version of the CRC algorithm to account for their buggy
139 * software. */
140 static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
142 int shift;
143 u_int32_t data;
144 u_int16_t sum, crc = 0;
146 for (; length; length--) {
147 data = CSR1212_LE32_TO_CPU(*buffer);
148 buffer++;
149 for (shift = 28; shift >= 0; shift -= 4 ) {
150 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
151 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
153 crc &= 0xffff;
156 return CSR1212_CPU_TO_BE16(crc);
158 #endif
160 static inline struct csr1212_dentry *csr1212_find_keyval(struct csr1212_keyval *dir,
161 struct csr1212_keyval *kv)
163 struct csr1212_dentry *pos;
165 for (pos = dir->value.directory.dentries_head;
166 pos != NULL; pos = pos->next) {
167 if (pos->kv == kv)
168 return pos;
170 return NULL;
174 static inline struct csr1212_keyval *csr1212_find_keyval_offset(struct csr1212_keyval *kv_list,
175 u_int32_t offset)
177 struct csr1212_keyval *kv;
179 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) {
180 if (kv->offset == offset)
181 return kv;
183 return NULL;
187 /* Creation Routines */
188 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
189 size_t bus_info_size, void *private_data)
191 struct csr1212_csr *csr;
193 csr = CSR1212_MALLOC(sizeof(*csr));
194 if (!csr)
195 return NULL;
197 csr->cache_head =
198 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
199 CSR1212_CONFIG_ROM_SPACE_SIZE);
200 if (!csr->cache_head) {
201 CSR1212_FREE(csr);
202 return NULL;
205 /* The keyval key id is not used for the root node, but a valid key id
206 * that can be used for a directory needs to be passed to
207 * csr1212_new_directory(). */
208 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
209 if (!csr->root_kv) {
210 CSR1212_FREE(csr->cache_head);
211 CSR1212_FREE(csr);
212 return NULL;
215 csr->bus_info_data = csr->cache_head->data;
216 csr->bus_info_len = bus_info_size;
217 csr->crc_len = bus_info_size;
218 csr->ops = ops;
219 csr->private_data = private_data;
220 csr->cache_tail = csr->cache_head;
222 return csr;
227 void csr1212_init_local_csr(struct csr1212_csr *csr,
228 const u_int32_t *bus_info_data, int max_rom)
230 static const int mr_map[] = { 4, 64, 1024, 0 };
232 #ifdef __KERNEL__
233 BUG_ON(max_rom & ~0x3);
234 csr->max_rom = mr_map[max_rom];
235 #else
236 if (max_rom & ~0x3) /* caller supplied invalid argument */
237 csr->max_rom = 0;
238 else
239 csr->max_rom = mr_map[max_rom];
240 #endif
241 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
245 static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
247 struct csr1212_keyval *kv;
249 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
250 return NULL;
252 kv = CSR1212_MALLOC(sizeof(*kv));
253 if (!kv)
254 return NULL;
256 kv->key.type = type;
257 kv->key.id = key;
259 kv->associate = NULL;
260 kv->refcnt = 1;
262 kv->next = NULL;
263 kv->prev = NULL;
264 kv->offset = 0;
265 kv->valid = 0;
266 return kv;
269 struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
271 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
273 if (!kv)
274 return NULL;
276 kv->value.immediate = value;
277 kv->valid = 1;
278 return kv;
281 struct csr1212_keyval *csr1212_new_leaf(u_int8_t key, const void *data, size_t data_len)
283 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
285 if (!kv)
286 return NULL;
288 if (data_len > 0) {
289 kv->value.leaf.data = CSR1212_MALLOC(data_len);
290 if (!kv->value.leaf.data) {
291 CSR1212_FREE(kv);
292 return NULL;
295 if (data)
296 memcpy(kv->value.leaf.data, data, data_len);
297 } else {
298 kv->value.leaf.data = NULL;
301 kv->value.leaf.len = bytes_to_quads(data_len);
302 kv->offset = 0;
303 kv->valid = 1;
305 return kv;
308 struct csr1212_keyval *csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset)
310 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
312 if (!kv)
313 return NULL;
315 kv->value.csr_offset = csr_offset;
317 kv->offset = 0;
318 kv->valid = 1;
319 return kv;
322 struct csr1212_keyval *csr1212_new_directory(u_int8_t key)
324 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
326 if (!kv)
327 return NULL;
329 kv->value.directory.len = 0;
330 kv->offset = 0;
331 kv->value.directory.dentries_head = NULL;
332 kv->value.directory.dentries_tail = NULL;
333 kv->valid = 1;
334 return kv;
337 int csr1212_associate_keyval(struct csr1212_keyval *kv,
338 struct csr1212_keyval *associate)
340 if (!kv || !associate)
341 return CSR1212_EINVAL;
343 if (kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
344 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
345 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
346 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
347 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
348 associate->key.id < 0x30))
349 return CSR1212_EINVAL;
351 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
352 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY)
353 return CSR1212_EINVAL;
355 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
356 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA)
357 return CSR1212_EINVAL;
359 if (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
360 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID)
361 return CSR1212_EINVAL;
363 if (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
364 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY)
365 return CSR1212_EINVAL;
367 if (kv->associate)
368 csr1212_release_keyval(kv->associate);
370 associate->refcnt++;
371 kv->associate = associate;
373 return CSR1212_SUCCESS;
376 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
377 struct csr1212_keyval *kv)
379 struct csr1212_dentry *dentry;
381 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
382 return CSR1212_EINVAL;
384 dentry = CSR1212_MALLOC(sizeof(*dentry));
385 if (!dentry)
386 return CSR1212_ENOMEM;
388 dentry->kv = kv;
390 kv->refcnt++;
392 dentry->next = NULL;
393 dentry->prev = dir->value.directory.dentries_tail;
395 if (!dir->value.directory.dentries_head)
396 dir->value.directory.dentries_head = dentry;
398 if (dir->value.directory.dentries_tail)
399 dir->value.directory.dentries_tail->next = dentry;
400 dir->value.directory.dentries_tail = dentry;
402 return CSR1212_SUCCESS;
405 struct csr1212_keyval *csr1212_new_extended_immediate(u_int32_t spec, u_int32_t key,
406 u_int32_t value)
408 struct csr1212_keyval *kvs, *kvk, *kvv;
410 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
411 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
412 kvv = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_DATA, value);
414 if (!kvs || !kvk || !kvv) {
415 if (kvs)
416 free_keyval(kvs);
417 if (kvk)
418 free_keyval(kvk);
419 if (kvv)
420 free_keyval(kvv);
421 return NULL;
424 /* Don't keep a local reference to the extended key or value. */
425 kvk->refcnt = 0;
426 kvv->refcnt = 0;
428 csr1212_associate_keyval(kvk, kvv);
429 csr1212_associate_keyval(kvs, kvk);
431 return kvs;
434 struct csr1212_keyval *csr1212_new_extended_leaf(u_int32_t spec, u_int32_t key,
435 const void *data, size_t data_len)
437 struct csr1212_keyval *kvs, *kvk, *kvv;
439 kvs = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID, spec);
440 kvk = csr1212_new_immediate(CSR1212_KV_ID_EXTENDED_KEY, key);
441 kvv = csr1212_new_leaf(CSR1212_KV_ID_EXTENDED_DATA, data, data_len);
443 if (!kvs || !kvk || !kvv) {
444 if (kvs)
445 free_keyval(kvs);
446 if (kvk)
447 free_keyval(kvk);
448 if (kvv)
449 free_keyval(kvv);
450 return NULL;
453 /* Don't keep a local reference to the extended key or value. */
454 kvk->refcnt = 0;
455 kvv->refcnt = 0;
457 csr1212_associate_keyval(kvk, kvv);
458 csr1212_associate_keyval(kvs, kvk);
460 return kvs;
463 struct csr1212_keyval *csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t specifier_id,
464 const void *data, size_t data_len)
466 struct csr1212_keyval *kv;
468 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
469 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
470 if (!kv)
471 return NULL;
473 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
474 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
476 if (data) {
477 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
480 return kv;
484 struct csr1212_keyval *csr1212_new_textual_descriptor_leaf(u_int8_t cwidth,
485 u_int16_t cset,
486 u_int16_t language,
487 const void *data,
488 size_t data_len)
490 struct csr1212_keyval *kv;
491 char *lstr;
493 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
494 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
495 if (!kv)
496 return NULL;
498 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
499 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
500 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
502 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
504 /* make sure last quadlet is zeroed out */
505 *((u_int32_t*)&(lstr[(data_len - 1) & ~0x3])) = 0;
507 /* don't copy the NUL terminator */
508 memcpy(lstr, data, data_len);
510 return kv;
513 static int csr1212_check_minimal_ascii(const char *s)
515 static const char minimal_ascii_table[] = {
516 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
517 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
518 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
519 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
520 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
521 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
522 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
523 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
524 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
525 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
526 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
527 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
528 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
529 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
530 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
531 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
533 for (; *s; s++) {
534 if (minimal_ascii_table[*s & 0x7F] != *s)
535 return -1; /* failed */
537 /* String conforms to minimal-ascii, as specified by IEEE 1212,
538 * par. 7.4 */
539 return 0;
542 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
544 /* Check if string conform to minimal_ascii format */
545 if (csr1212_check_minimal_ascii(s))
546 return NULL;
548 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
549 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
552 struct csr1212_keyval *csr1212_new_icon_descriptor_leaf(u_int32_t version,
553 u_int8_t palette_depth,
554 u_int8_t color_space,
555 u_int16_t language,
556 u_int16_t hscan,
557 u_int16_t vscan,
558 u_int32_t *palette,
559 u_int32_t *pixels)
561 static const int pd[4] = { 0, 4, 16, 256 };
562 static const int cs[16] = { 4, 2 };
563 struct csr1212_keyval *kv;
564 int palette_size;
565 int pixel_size = (hscan * vscan + 3) & ~0x3;
567 if (!pixels || (!palette && palette_depth) ||
568 (palette_depth & ~0x3) || (color_space & ~0xf))
569 return NULL;
571 palette_size = pd[palette_depth] * cs[color_space];
573 kv = csr1212_new_descriptor_leaf(1, 0, NULL,
574 palette_size + pixel_size +
575 CSR1212_ICON_DESCRIPTOR_LEAF_OVERHEAD);
576 if (!kv)
577 return NULL;
579 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VERSION(kv, version);
580 CSR1212_ICON_DESCRIPTOR_LEAF_SET_PALETTE_DEPTH(kv, palette_depth);
581 CSR1212_ICON_DESCRIPTOR_LEAF_SET_COLOR_SPACE(kv, color_space);
582 CSR1212_ICON_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
583 CSR1212_ICON_DESCRIPTOR_LEAF_SET_HSCAN(kv, hscan);
584 CSR1212_ICON_DESCRIPTOR_LEAF_SET_VSCAN(kv, vscan);
586 if (palette_size)
587 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PALETTE(kv), palette,
588 palette_size);
590 memcpy(CSR1212_ICON_DESCRIPTOR_LEAF_PIXELS(kv), pixels, pixel_size);
592 return kv;
595 struct csr1212_keyval *csr1212_new_modifiable_descriptor_leaf(u_int16_t max_size,
596 u_int64_t address)
598 struct csr1212_keyval *kv;
600 /* IEEE 1212, par. 7.5.4.3 Modifiable descriptors */
601 kv = csr1212_new_leaf(CSR1212_KV_ID_MODIFIABLE_DESCRIPTOR, NULL, sizeof(u_int64_t));
602 if(!kv)
603 return NULL;
605 CSR1212_MODIFIABLE_DESCRIPTOR_SET_MAX_SIZE(kv, max_size);
606 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_HI(kv, address);
607 CSR1212_MODIFIABLE_DESCRIPTOR_SET_ADDRESS_LO(kv, address);
609 return kv;
612 static int csr1212_check_keyword(const char *s)
614 for (; *s; s++) {
616 if (('A' <= *s) && (*s <= 'Z'))
617 continue;
618 if (('0' <= *s) && (*s <= '9'))
619 continue;
620 if (*s == '-')
621 continue;
623 return -1; /* failed */
625 /* String conforms to keyword, as specified by IEEE 1212,
626 * par. 7.6.5 */
627 return CSR1212_SUCCESS;
630 struct csr1212_keyval *csr1212_new_keyword_leaf(int strc, const char *strv[])
632 struct csr1212_keyval *kv;
633 char *buffer;
634 int i, data_len = 0;
636 /* Check all keywords to see if they conform to restrictions:
637 * Only the following characters is allowed ['A'..'Z','0'..'9','-']
638 * Each word is zero-terminated.
639 * Also calculate the total length of the keywords.
641 for (i = 0; i < strc; i++) {
642 if (!strv[i] || csr1212_check_keyword(strv[i])) {
643 return NULL;
645 data_len += strlen(strv[i]) + 1; /* Add zero-termination char. */
648 /* IEEE 1212, par. 7.6.5 Keyword leaves */
649 kv = csr1212_new_leaf(CSR1212_KV_ID_KEYWORD, NULL, data_len);
650 if (!kv)
651 return NULL;
653 buffer = (char *)kv->value.leaf.data;
655 /* make sure last quadlet is zeroed out */
656 *((u_int32_t*)&(buffer[(data_len - 1) & ~0x3])) = 0;
658 /* Copy keyword(s) into leaf data buffer */
659 for (i = 0; i < strc; i++) {
660 int len = strlen(strv[i]) + 1;
661 memcpy(buffer, strv[i], len);
662 buffer += len;
664 return kv;
668 /* Destruction Routines */
670 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
671 struct csr1212_keyval *kv)
673 struct csr1212_dentry *dentry;
675 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
676 return;
678 dentry = csr1212_find_keyval(dir, kv);
680 if (!dentry)
681 return;
683 if (dentry->prev)
684 dentry->prev->next = dentry->next;
685 if (dentry->next)
686 dentry->next->prev = dentry->prev;
687 if (dir->value.directory.dentries_head == dentry)
688 dir->value.directory.dentries_head = dentry->next;
689 if (dir->value.directory.dentries_tail == dentry)
690 dir->value.directory.dentries_tail = dentry->prev;
692 CSR1212_FREE(dentry);
694 csr1212_release_keyval(kv);
698 void csr1212_disassociate_keyval(struct csr1212_keyval *kv)
700 if (kv->associate) {
701 csr1212_release_keyval(kv->associate);
704 kv->associate = NULL;
708 /* This function is used to free the memory taken by a keyval. If the given
709 * keyval is a directory type, then any keyvals contained in that directory
710 * will be destroyed as well if their respective refcnts are 0. By means of
711 * list manipulation, this routine will descend a directory structure in a
712 * non-recursive manner. */
713 void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
715 struct csr1212_keyval *k, *a;
716 struct csr1212_dentry dentry;
717 struct csr1212_dentry *head, *tail;
719 dentry.kv = kv;
720 dentry.next = NULL;
721 dentry.prev = NULL;
723 head = &dentry;
724 tail = head;
726 while (head) {
727 k = head->kv;
729 while (k) {
730 k->refcnt--;
732 if (k->refcnt > 0)
733 break;
735 a = k->associate;
737 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
738 /* If the current entry is a directory, then move all
739 * the entries to the destruction list. */
740 if (k->value.directory.dentries_head) {
741 tail->next = k->value.directory.dentries_head;
742 k->value.directory.dentries_head->prev = tail;
743 tail = k->value.directory.dentries_tail;
746 free_keyval(k);
747 k = a;
750 head = head->next;
751 if (head) {
752 if (head->prev && head->prev != &dentry) {
753 CSR1212_FREE(head->prev);
755 head->prev = NULL;
756 } else if (tail != &dentry)
757 CSR1212_FREE(tail);
762 void csr1212_destroy_csr(struct csr1212_csr *csr)
764 struct csr1212_csr_rom_cache *c, *oc;
765 struct csr1212_cache_region *cr, *ocr;
767 csr1212_release_keyval(csr->root_kv);
769 c = csr->cache_head;
770 while (c) {
771 oc = c;
772 cr = c->filled_head;
773 while (cr) {
774 ocr = cr;
775 cr = cr->next;
776 CSR1212_FREE(ocr);
778 c = c->next;
779 CSR1212_FREE(oc);
782 CSR1212_FREE(csr);
787 /* CSR Image Creation */
789 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
791 struct csr1212_csr_rom_cache *cache;
792 u_int64_t csr_addr;
794 if (!csr || !csr->ops || !csr->ops->allocate_addr_range ||
795 !csr->ops->release_addr || csr->max_rom < 1)
796 return CSR1212_EINVAL;
798 /* ROM size must be a multiple of csr->max_rom */
799 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
801 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private_data);
802 if (csr_addr == ~0ULL) {
803 return CSR1212_ENOMEM;
805 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
806 /* Invalid address returned from allocate_addr_range(). */
807 csr->ops->release_addr(csr_addr, csr->private_data);
808 return CSR1212_ENOMEM;
811 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
812 if (!cache) {
813 csr->ops->release_addr(csr_addr, csr->private_data);
814 return CSR1212_ENOMEM;
817 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
818 if (!cache->ext_rom) {
819 csr->ops->release_addr(csr_addr, csr->private_data);
820 CSR1212_FREE(cache);
821 return CSR1212_ENOMEM;
824 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
825 csr1212_release_keyval(cache->ext_rom);
826 csr->ops->release_addr(csr_addr, csr->private_data);
827 CSR1212_FREE(cache);
828 return CSR1212_ENOMEM;
830 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
831 cache->ext_rom->value.leaf.len = -1;
832 cache->ext_rom->value.leaf.data = cache->data;
834 /* Add cache to tail of cache list */
835 cache->prev = csr->cache_tail;
836 csr->cache_tail->next = cache;
837 csr->cache_tail = cache;
838 return CSR1212_SUCCESS;
841 static inline void csr1212_remove_cache(struct csr1212_csr *csr,
842 struct csr1212_csr_rom_cache *cache)
844 if (csr->cache_head == cache)
845 csr->cache_head = cache->next;
846 if (csr->cache_tail == cache)
847 csr->cache_tail = cache->prev;
849 if (cache->prev)
850 cache->prev->next = cache->next;
851 if (cache->next)
852 cache->next->prev = cache->prev;
854 if (cache->ext_rom) {
855 csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
856 csr1212_release_keyval(cache->ext_rom);
859 CSR1212_FREE(cache);
862 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
863 struct csr1212_keyval **layout_tail)
865 struct csr1212_dentry *dentry;
866 struct csr1212_keyval *dkv;
867 struct csr1212_keyval *last_extkey_spec = NULL;
868 struct csr1212_keyval *last_extkey = NULL;
869 int num_entries = 0;
871 for (dentry = dir->value.directory.dentries_head; dentry;
872 dentry = dentry->next) {
873 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
874 /* Special Case: Extended Key Specifier_ID */
875 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
876 if (last_extkey_spec == NULL) {
877 last_extkey_spec = dkv;
878 } else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
879 last_extkey_spec = dkv;
880 } else {
881 continue;
883 /* Special Case: Extended Key */
884 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
885 if (last_extkey == NULL) {
886 last_extkey = dkv;
887 } else if (dkv->value.immediate != last_extkey->value.immediate) {
888 last_extkey = dkv;
889 } else {
890 continue;
894 num_entries += 1;
896 switch(dkv->key.type) {
897 default:
898 case CSR1212_KV_TYPE_IMMEDIATE:
899 case CSR1212_KV_TYPE_CSR_OFFSET:
900 break;
901 case CSR1212_KV_TYPE_LEAF:
902 case CSR1212_KV_TYPE_DIRECTORY:
903 /* Remove from list */
904 if (dkv->prev && (dkv->prev->next == dkv))
905 dkv->prev->next = dkv->next;
906 if (dkv->next && (dkv->next->prev == dkv))
907 dkv->next->prev = dkv->prev;
908 //if (dkv == *layout_tail)
909 // *layout_tail = dkv->prev;
911 /* Special case: Extended ROM leafs */
912 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
913 dkv->value.leaf.len = -1;
914 /* Don't add Extended ROM leafs in the layout list,
915 * they are handled differently. */
916 break;
919 /* Add to tail of list */
920 dkv->next = NULL;
921 dkv->prev = *layout_tail;
922 (*layout_tail)->next = dkv;
923 *layout_tail = dkv;
924 break;
928 return num_entries;
931 size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
933 struct csr1212_keyval *ltail = kv;
934 size_t agg_size = 0;
936 while(kv) {
937 switch(kv->key.type) {
938 case CSR1212_KV_TYPE_LEAF:
939 /* Add 1 quadlet for crc/len field */
940 agg_size += kv->value.leaf.len + 1;
941 break;
943 case CSR1212_KV_TYPE_DIRECTORY:
944 kv->value.directory.len = csr1212_generate_layout_subdir(kv, &ltail);
945 /* Add 1 quadlet for crc/len field */
946 agg_size += kv->value.directory.len + 1;
947 break;
949 kv = kv->next;
951 return quads_to_bytes(agg_size);
954 struct csr1212_keyval *csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
955 struct csr1212_keyval *start_kv,
956 int start_pos)
958 struct csr1212_keyval *kv = start_kv;
959 struct csr1212_keyval *okv = start_kv;
960 int pos = start_pos;
961 int kv_len = 0, okv_len = 0;
963 cache->layout_head = kv;
965 while(kv && pos < cache->size) {
966 /* Special case: Extended ROM leafs */
967 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
968 kv->offset = cache->offset + pos;
971 switch(kv->key.type) {
972 case CSR1212_KV_TYPE_LEAF:
973 kv_len = kv->value.leaf.len;
974 break;
976 case CSR1212_KV_TYPE_DIRECTORY:
977 kv_len = kv->value.directory.len;
978 break;
980 default:
981 /* Should never get here */
982 break;
985 pos += quads_to_bytes(kv_len + 1);
987 if (pos <= cache->size) {
988 okv = kv;
989 okv_len = kv_len;
990 kv = kv->next;
994 cache->layout_tail = okv;
995 cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
997 return kv;
1000 static void csr1212_generate_tree_subdir(struct csr1212_keyval *dir,
1001 u_int32_t *data_buffer)
1003 struct csr1212_dentry *dentry;
1004 struct csr1212_keyval *last_extkey_spec = NULL;
1005 struct csr1212_keyval *last_extkey = NULL;
1006 int index = 0;
1008 for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
1009 struct csr1212_keyval *a;
1011 for (a = dentry->kv; a; a = a->associate) {
1012 u_int32_t value = 0;
1014 /* Special Case: Extended Key Specifier_ID */
1015 if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
1016 if (last_extkey_spec == NULL) {
1017 last_extkey_spec = a;
1018 } else if (a->value.immediate != last_extkey_spec->value.immediate) {
1019 last_extkey_spec = a;
1020 } else {
1021 continue;
1023 /* Special Case: Extended Key */
1024 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
1025 if (last_extkey == NULL) {
1026 last_extkey = a;
1027 } else if (a->value.immediate != last_extkey->value.immediate) {
1028 last_extkey = a;
1029 } else {
1030 continue;
1034 switch(a->key.type) {
1035 case CSR1212_KV_TYPE_IMMEDIATE:
1036 value = a->value.immediate;
1037 break;
1038 case CSR1212_KV_TYPE_CSR_OFFSET:
1039 value = a->value.csr_offset;
1040 break;
1041 case CSR1212_KV_TYPE_LEAF:
1042 value = a->offset;
1043 value -= dir->offset + quads_to_bytes(1+index);
1044 value = bytes_to_quads(value);
1045 break;
1046 case CSR1212_KV_TYPE_DIRECTORY:
1047 value = a->offset;
1048 value -= dir->offset + quads_to_bytes(1+index);
1049 value = bytes_to_quads(value);
1050 break;
1051 default:
1052 /* Should never get here */
1053 break; /* GDB breakpoint */
1056 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
1057 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
1058 (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
1059 data_buffer[index] = CSR1212_CPU_TO_BE32(value);
1060 index++;
1065 void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
1067 struct csr1212_keyval *kv, *nkv;
1068 struct csr1212_keyval_img *kvi;
1070 for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
1071 kvi = (struct csr1212_keyval_img *)
1072 (cache->data + bytes_to_quads(kv->offset - cache->offset));
1073 switch(kv->key.type) {
1074 default:
1075 case CSR1212_KV_TYPE_IMMEDIATE:
1076 case CSR1212_KV_TYPE_CSR_OFFSET:
1077 /* Should never get here */
1078 break; /* GDB breakpoint */
1080 case CSR1212_KV_TYPE_LEAF:
1081 /* Don't copy over Extended ROM areas, they are
1082 * already filled out! */
1083 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1084 memcpy(kvi->data, kv->value.leaf.data,
1085 quads_to_bytes(kv->value.leaf.len));
1087 kvi->length = CSR1212_CPU_TO_BE16(kv->value.leaf.len);
1088 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
1089 break;
1091 case CSR1212_KV_TYPE_DIRECTORY:
1092 csr1212_generate_tree_subdir(kv, kvi->data);
1094 kvi->length = CSR1212_CPU_TO_BE16(kv->value.directory.len);
1095 kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
1096 break;
1099 nkv = kv->next;
1100 if (kv->prev)
1101 kv->prev->next = NULL;
1102 if (kv->next)
1103 kv->next->prev = NULL;
1104 kv->prev = NULL;
1105 kv->next = NULL;
1109 int csr1212_generate_csr_image(struct csr1212_csr *csr)
1111 struct csr1212_bus_info_block_img *bi;
1112 struct csr1212_csr_rom_cache *cache;
1113 struct csr1212_keyval *kv;
1114 size_t agg_size;
1115 int ret;
1116 int init_offset;
1118 if (!csr)
1119 return CSR1212_EINVAL;
1121 cache = csr->cache_head;
1123 bi = (struct csr1212_bus_info_block_img*)cache->data;
1125 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
1126 bi->crc_length = bi->length;
1127 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
1129 csr->root_kv->next = NULL;
1130 csr->root_kv->prev = NULL;
1132 agg_size = csr1212_generate_layout_order(csr->root_kv);
1134 init_offset = csr->bus_info_len;
1136 for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
1137 if (!cache) {
1138 /* Estimate approximate number of additional cache
1139 * regions needed (it assumes that the cache holding
1140 * the first 1K Config ROM space always exists). */
1141 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
1142 (2 * sizeof(u_int32_t))) + 1;
1144 /* Add additional cache regions, extras will be
1145 * removed later */
1146 for (; est_c; est_c--) {
1147 ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
1148 if (ret != CSR1212_SUCCESS)
1149 return ret;
1151 /* Need to re-layout for additional cache regions */
1152 agg_size = csr1212_generate_layout_order(csr->root_kv);
1153 kv = csr->root_kv;
1154 cache = csr->cache_head;
1155 init_offset = csr->bus_info_len;
1157 kv = csr1212_generate_positions(cache, kv, init_offset);
1158 agg_size -= cache->len;
1159 init_offset = sizeof(u_int32_t);
1162 /* Remove unused, excess cache regions */
1163 while (cache) {
1164 struct csr1212_csr_rom_cache *oc = cache;
1166 cache = cache->next;
1167 csr1212_remove_cache(csr, oc);
1170 /* Go through the list backward so that when done, the correct CRC
1171 * will be calculated for the Extended ROM areas. */
1172 for(cache = csr->cache_tail; cache; cache = cache->prev) {
1173 /* Only Extended ROM caches should have this set. */
1174 if (cache->ext_rom) {
1175 int leaf_size;
1177 /* Make sure the Extended ROM leaf is a multiple of
1178 * max_rom in size. */
1179 if (csr->max_rom < 1)
1180 return CSR1212_EINVAL;
1181 leaf_size = (cache->len + (csr->max_rom - 1)) &
1182 ~(csr->max_rom - 1);
1184 /* Zero out the unused ROM region */
1185 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1186 leaf_size - cache->len);
1188 /* Subtract leaf header */
1189 leaf_size -= sizeof(u_int32_t);
1191 /* Update the Extended ROM leaf length */
1192 cache->ext_rom->value.leaf.len =
1193 bytes_to_quads(leaf_size);
1194 } else {
1195 /* Zero out the unused ROM region */
1196 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1197 cache->size - cache->len);
1200 /* Copy the data into the cache buffer */
1201 csr1212_fill_cache(cache);
1203 if (cache != csr->cache_head) {
1204 /* Set the length and CRC of the extended ROM. */
1205 struct csr1212_keyval_img *kvi =
1206 (struct csr1212_keyval_img*)cache->data;
1208 kvi->length = CSR1212_CPU_TO_BE16(bytes_to_quads(cache->len) - 1);
1209 kvi->crc = csr1212_crc16(kvi->data,
1210 bytes_to_quads(cache->len) - 1);
1215 return CSR1212_SUCCESS;
1218 int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer, u_int32_t len)
1220 struct csr1212_csr_rom_cache *cache;
1222 for (cache = csr->cache_head; cache; cache = cache->next) {
1223 if (offset >= cache->offset &&
1224 (offset + len) <= (cache->offset + cache->size)) {
1225 memcpy(buffer,
1226 &cache->data[bytes_to_quads(offset - cache->offset)],
1227 len);
1228 return CSR1212_SUCCESS;
1231 return CSR1212_ENOENT;
1236 /* Parse a chunk of data as a Config ROM */
1238 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1240 struct csr1212_bus_info_block_img *bi;
1241 struct csr1212_cache_region *cr;
1242 int i;
1243 int ret;
1245 /* IEEE 1212 says that the entire bus info block should be readable in
1246 * a single transaction regardless of the max_rom value.
1247 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1248 * bus info block will be read 1 quadlet at a time. The rest of the
1249 * ConfigROM will be read according to the max_rom field. */
1250 for (i = 0; i < csr->bus_info_len; i += sizeof(csr1212_quad_t)) {
1251 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1252 sizeof(csr1212_quad_t),
1253 &csr->cache_head->data[bytes_to_quads(i)],
1254 csr->private_data);
1255 if (ret != CSR1212_SUCCESS)
1256 return ret;
1259 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1260 csr->crc_len = quads_to_bytes(bi->crc_length);
1262 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
1263 * always the case, so read the rest of the crc area 1 quadlet at a time. */
1264 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(csr1212_quad_t)) {
1265 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1266 sizeof(csr1212_quad_t),
1267 &csr->cache_head->data[bytes_to_quads(i)],
1268 csr->private_data);
1269 if (ret != CSR1212_SUCCESS)
1270 return ret;
1273 if (bytes_to_quads(csr->bus_info_len - sizeof(csr1212_quad_t)) != bi->length)
1274 return CSR1212_EINVAL;
1276 #if 0
1277 /* Apparently there are too many differnt wrong implementations of the
1278 * CRC algorithm that verifying them is moot. */
1279 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1280 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1281 return CSR1212_EINVAL;
1282 #endif
1284 cr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1285 if (!cr)
1286 return CSR1212_ENOMEM;
1288 cr->next = NULL;
1289 cr->prev = NULL;
1290 cr->offset_start = 0;
1291 cr->offset_end = csr->crc_len + 4;
1293 csr->cache_head->filled_head = cr;
1294 csr->cache_head->filled_tail = cr;
1296 return CSR1212_SUCCESS;
1299 static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1300 csr1212_quad_t ki,
1301 u_int32_t kv_pos)
1303 int ret = CSR1212_SUCCESS;
1304 struct csr1212_keyval *k = NULL;
1305 u_int32_t offset;
1307 switch(CSR1212_KV_KEY_TYPE(ki)) {
1308 case CSR1212_KV_TYPE_IMMEDIATE:
1309 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1310 CSR1212_KV_VAL(ki));
1311 if (!k) {
1312 ret = CSR1212_ENOMEM;
1313 goto fail;
1316 k->refcnt = 0; /* Don't keep local reference when parsing. */
1317 break;
1319 case CSR1212_KV_TYPE_CSR_OFFSET:
1320 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1321 CSR1212_KV_VAL(ki));
1322 if (!k) {
1323 ret = CSR1212_ENOMEM;
1324 goto fail;
1326 k->refcnt = 0; /* Don't keep local reference when parsing. */
1327 break;
1329 default:
1330 /* Compute the offset from 0xffff f000 0000. */
1331 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1332 if (offset == kv_pos) {
1333 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1334 * or Directories. The Config ROM image is most likely
1335 * messed up, so we'll just abort here. */
1336 ret = CSR1212_EIO;
1337 goto fail;
1340 k = csr1212_find_keyval_offset(dir, offset);
1342 if (k)
1343 break; /* Found it. */
1345 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
1346 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1347 } else {
1348 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1350 if (!k) {
1351 ret = CSR1212_ENOMEM;
1352 goto fail;
1354 k->refcnt = 0; /* Don't keep local reference when parsing. */
1355 k->valid = 0; /* Contents not read yet so it's not valid. */
1356 k->offset = offset;
1358 k->prev = dir;
1359 k->next = dir->next;
1360 dir->next->prev = k;
1361 dir->next = k;
1363 ret = csr1212_attach_keyval_to_directory(dir, k);
1365 fail:
1366 if (ret != CSR1212_SUCCESS) {
1367 if (k)
1368 free_keyval(k);
1370 return ret;
1374 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1375 struct csr1212_csr_rom_cache *cache)
1377 struct csr1212_keyval_img *kvi;
1378 int i;
1379 int ret = CSR1212_SUCCESS;
1380 int kvi_len;
1382 kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
1383 cache->offset)];
1384 kvi_len = CSR1212_BE16_TO_CPU(kvi->length);
1386 #if 0
1387 /* Apparently there are too many differnt wrong implementations of the
1388 * CRC algorithm that verifying them is moot. */
1389 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1390 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
1391 ret = CSR1212_EINVAL;
1392 goto fail;
1394 #endif
1396 switch(kv->key.type) {
1397 case CSR1212_KV_TYPE_DIRECTORY:
1398 for (i = 0; i < kvi_len; i++) {
1399 csr1212_quad_t ki = kvi->data[i];
1401 /* Some devices put null entries in their unit
1402 * directories. If we come across such an entry,
1403 * then skip it. */
1404 if (ki == 0x0)
1405 continue;
1406 ret = csr1212_parse_dir_entry(kv, ki,
1407 (kv->offset +
1408 quads_to_bytes(i + 1)));
1410 kv->value.directory.len = kvi_len;
1411 break;
1413 case CSR1212_KV_TYPE_LEAF:
1414 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1415 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1416 if (!kv->value.leaf.data)
1418 ret = CSR1212_ENOMEM;
1419 goto fail;
1422 kv->value.leaf.len = kvi_len;
1423 memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
1425 break;
1428 kv->valid = 1;
1430 fail:
1431 return ret;
1435 int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1437 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1438 struct csr1212_keyval_img *kvi = NULL;
1439 struct csr1212_csr_rom_cache *cache;
1440 int cache_index;
1441 u_int64_t addr;
1442 u_int32_t *cache_ptr;
1443 u_int16_t kv_len = 0;
1445 if (!csr || !kv || csr->max_rom < 1)
1446 return CSR1212_EINVAL;
1448 /* First find which cache the data should be in (or go in if not read
1449 * yet). */
1450 for (cache = csr->cache_head; cache; cache = cache->next) {
1451 if (kv->offset >= cache->offset &&
1452 kv->offset < (cache->offset + cache->size))
1453 break;
1456 if (!cache) {
1457 csr1212_quad_t q;
1458 u_int32_t cache_size;
1460 /* Only create a new cache for Extended ROM leaves. */
1461 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1462 return CSR1212_EINVAL;
1464 if (csr->ops->bus_read(csr,
1465 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1466 sizeof(csr1212_quad_t), &q, csr->private_data)) {
1467 return CSR1212_EIO;
1470 kv->value.leaf.len = CSR1212_BE32_TO_CPU(q) >> 16;
1472 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1473 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1475 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1476 if (!cache)
1477 return CSR1212_ENOMEM;
1479 kv->value.leaf.data = &cache->data[1];
1480 csr->cache_tail->next = cache;
1481 cache->prev = csr->cache_tail;
1482 cache->next = NULL;
1483 csr->cache_tail = cache;
1484 cache->filled_head =
1485 CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1486 if (!cache->filled_head) {
1487 return CSR1212_ENOMEM;
1490 cache->filled_head->offset_start = 0;
1491 cache->filled_head->offset_end = sizeof(csr1212_quad_t);
1492 cache->filled_tail = cache->filled_head;
1493 cache->filled_head->next = NULL;
1494 cache->filled_head->prev = NULL;
1495 cache->data[0] = q;
1497 /* Don't read the entire extended ROM now. Pieces of it will
1498 * be read when entries inside it are read. */
1499 return csr1212_parse_keyval(kv, cache);
1502 cache_index = kv->offset - cache->offset;
1504 /* Now seach read portions of the cache to see if it is there. */
1505 for (cr = cache->filled_head; cr; cr = cr->next) {
1506 if (cache_index < cr->offset_start) {
1507 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1508 if (!newcr)
1509 return CSR1212_ENOMEM;
1511 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1512 newcr->offset_end = newcr->offset_start;
1513 newcr->next = cr;
1514 newcr->prev = cr->prev;
1515 cr->prev = newcr;
1516 cr = newcr;
1517 break;
1518 } else if ((cache_index >= cr->offset_start) &&
1519 (cache_index < cr->offset_end)) {
1520 kvi = (struct csr1212_keyval_img*)
1521 (&cache->data[bytes_to_quads(cache_index)]);
1522 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1524 break;
1525 } else if (cache_index == cr->offset_end)
1526 break;
1529 if (!cr) {
1530 cr = cache->filled_tail;
1531 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
1532 if (!newcr)
1533 return CSR1212_ENOMEM;
1535 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1536 newcr->offset_end = newcr->offset_start;
1537 newcr->prev = cr;
1538 newcr->next = cr->next;
1539 cr->next = newcr;
1540 cr = newcr;
1541 cache->filled_tail = newcr;
1544 while(!kvi || cr->offset_end < cache_index + kv_len) {
1545 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1546 ~(csr->max_rom - 1))];
1548 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1549 cr->offset_end) & ~(csr->max_rom - 1);
1551 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1552 csr->private_data)) {
1553 if (csr->max_rom == 4)
1554 /* We've got problems! */
1555 return CSR1212_EIO;
1557 /* Apperently the max_rom value was a lie, set it to
1558 * do quadlet reads and try again. */
1559 csr->max_rom = 4;
1560 continue;
1563 cr->offset_end += csr->max_rom - (cr->offset_end &
1564 (csr->max_rom - 1));
1566 if (!kvi && (cr->offset_end > cache_index)) {
1567 kvi = (struct csr1212_keyval_img*)
1568 (&cache->data[bytes_to_quads(cache_index)]);
1569 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1573 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1574 /* The Leaf or Directory claims its length extends
1575 * beyond the ConfigROM image region and thus beyond the
1576 * end of our cache region. Therefore, we abort now
1577 * rather than seg faulting later. */
1578 return CSR1212_EIO;
1581 ncr = cr->next;
1583 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1584 /* consolidate region entries */
1585 ncr->offset_start = cr->offset_start;
1587 if (cr->prev)
1588 cr->prev->next = cr->next;
1589 ncr->prev = cr->prev;
1590 if (cache->filled_head == cr)
1591 cache->filled_head = ncr;
1592 CSR1212_FREE(cr);
1593 cr = ncr;
1597 return csr1212_parse_keyval(kv, cache);
1602 int csr1212_parse_csr(struct csr1212_csr *csr)
1604 struct csr1212_dentry *dentry;
1605 int ret;
1607 if (!csr || !csr->ops || !csr->ops->bus_read)
1608 return CSR1212_EINVAL;
1610 ret = csr1212_parse_bus_info_block(csr);
1611 if (ret != CSR1212_SUCCESS)
1612 return ret;
1615 * There has been a buggy firmware with bus_info_block.max_rom > 0
1616 * spotted which actually only supported quadlet read requests to the
1617 * config ROM. Therefore read everything quadlet by quadlet regardless
1618 * of what the bus info block says. This mirrors a similar change
1619 * made in the Linux kernel around 4 Jan 2009. See
1620 * http://git.kernel.org/linus/0bed1819687b50a7
1621 * The other hunks in that diff are cleanups - removal of things
1622 * which aren't needed now that max_rom is fixed at 4. In time it
1623 * may be worthwhile merging them too.
1625 csr->max_rom = 4;
1627 csr->cache_head->layout_head = csr->root_kv;
1628 csr->cache_head->layout_tail = csr->root_kv;
1630 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1631 csr->bus_info_len;
1633 csr->root_kv->valid = 0;
1634 csr->root_kv->next = csr->root_kv;
1635 csr->root_kv->prev = csr->root_kv;
1636 csr1212_get_keyval(csr, csr->root_kv);
1638 /* Scan through the Root directory finding all extended ROM regions
1639 * and make cache regions for them */
1640 for (dentry = csr->root_kv->value.directory.dentries_head;
1641 dentry; dentry = dentry->next) {
1642 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
1643 csr1212_get_keyval(csr, dentry->kv);
1645 if (ret != CSR1212_SUCCESS)
1646 return ret;
1650 return CSR1212_SUCCESS;