2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
58 * public, protected, and private methods.
60 #include "remote_node_table.h"
61 #include "remote_node_context.h"
64 * sci_remote_node_table_get_group_index()
65 * @remote_node_table: This is the remote node index table from which the
66 * selection will be made.
67 * @group_table_index: This is the index to the group table from which to
68 * search for an available selection.
70 * This routine will find the bit position in absolute bit terms of the next 32
71 * + bit position. If there are available bits in the first u32 then it is
72 * just bit position. u32 This is the absolute bit position for an available
75 static u32
sci_remote_node_table_get_group_index(
76 struct sci_remote_node_table
*remote_node_table
,
77 u32 group_table_index
)
83 group_table
= remote_node_table
->remote_node_groups
[group_table_index
];
85 for (dword_index
= 0; dword_index
< remote_node_table
->group_array_size
; dword_index
++) {
86 if (group_table
[dword_index
] != 0) {
87 for (bit_index
= 0; bit_index
< 32; bit_index
++) {
88 if ((group_table
[dword_index
] & (1 << bit_index
)) != 0) {
89 return (dword_index
* 32) + bit_index
;
95 return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX
;
99 * sci_remote_node_table_clear_group_index()
100 * @remote_node_table: This the remote node table in which to clear the
102 * @group_table_index: This is the remote node selector in which the change will be
104 * @group_index: This is the bit index in the table to be modified.
106 * This method will clear the group index entry in the specified group index
109 static void sci_remote_node_table_clear_group_index(
110 struct sci_remote_node_table
*remote_node_table
,
111 u32 group_table_index
,
118 BUG_ON(group_table_index
>= SCU_STP_REMOTE_NODE_COUNT
);
119 BUG_ON(group_index
>= (u32
)(remote_node_table
->group_array_size
* 32));
121 dword_index
= group_index
/ 32;
122 bit_index
= group_index
% 32;
123 group_table
= remote_node_table
->remote_node_groups
[group_table_index
];
125 group_table
[dword_index
] = group_table
[dword_index
] & ~(1 << bit_index
);
129 * sci_remote_node_table_set_group_index()
130 * @remote_node_table: This the remote node table in which to set the
132 * @group_table_index: This is the remote node selector in which the change
134 * @group_index: This is the bit position in the table to be modified.
136 * This method will set the group index bit entry in the specified gropu index
139 static void sci_remote_node_table_set_group_index(
140 struct sci_remote_node_table
*remote_node_table
,
141 u32 group_table_index
,
148 BUG_ON(group_table_index
>= SCU_STP_REMOTE_NODE_COUNT
);
149 BUG_ON(group_index
>= (u32
)(remote_node_table
->group_array_size
* 32));
151 dword_index
= group_index
/ 32;
152 bit_index
= group_index
% 32;
153 group_table
= remote_node_table
->remote_node_groups
[group_table_index
];
155 group_table
[dword_index
] = group_table
[dword_index
] | (1 << bit_index
);
159 * sci_remote_node_table_set_node_index()
160 * @remote_node_table: This is the remote node table in which to modify
161 * the remote node availability.
162 * @remote_node_index: This is the remote node index that is being returned to
165 * This method will set the remote to available in the remote node allocation
168 static void sci_remote_node_table_set_node_index(
169 struct sci_remote_node_table
*remote_node_table
,
170 u32 remote_node_index
)
178 (remote_node_table
->available_nodes_array_size
* SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD
)
179 <= (remote_node_index
/ SCU_STP_REMOTE_NODE_COUNT
)
182 dword_location
= remote_node_index
/ SCIC_SDS_REMOTE_NODES_PER_DWORD
;
183 dword_remainder
= remote_node_index
% SCIC_SDS_REMOTE_NODES_PER_DWORD
;
184 slot_normalized
= (dword_remainder
/ SCU_STP_REMOTE_NODE_COUNT
) * sizeof(u32
);
185 slot_position
= remote_node_index
% SCU_STP_REMOTE_NODE_COUNT
;
187 remote_node_table
->available_remote_nodes
[dword_location
] |=
188 1 << (slot_normalized
+ slot_position
);
192 * sci_remote_node_table_clear_node_index()
193 * @remote_node_table: This is the remote node table from which to clear
194 * the available remote node bit.
195 * @remote_node_index: This is the remote node index which is to be cleared
198 * This method clears the remote node index from the table of available remote
201 static void sci_remote_node_table_clear_node_index(
202 struct sci_remote_node_table
*remote_node_table
,
203 u32 remote_node_index
)
211 (remote_node_table
->available_nodes_array_size
* SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD
)
212 <= (remote_node_index
/ SCU_STP_REMOTE_NODE_COUNT
)
215 dword_location
= remote_node_index
/ SCIC_SDS_REMOTE_NODES_PER_DWORD
;
216 dword_remainder
= remote_node_index
% SCIC_SDS_REMOTE_NODES_PER_DWORD
;
217 slot_normalized
= (dword_remainder
/ SCU_STP_REMOTE_NODE_COUNT
) * sizeof(u32
);
218 slot_position
= remote_node_index
% SCU_STP_REMOTE_NODE_COUNT
;
220 remote_node_table
->available_remote_nodes
[dword_location
] &=
221 ~(1 << (slot_normalized
+ slot_position
));
225 * sci_remote_node_table_clear_group()
226 * @remote_node_table: The remote node table from which the slot will be
228 * @group_index: The index for the slot that is to be cleared.
230 * This method clears the entire table slot at the specified slot index. none
232 static void sci_remote_node_table_clear_group(
233 struct sci_remote_node_table
*remote_node_table
,
241 (remote_node_table
->available_nodes_array_size
* SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD
)
242 <= (group_index
/ SCU_STP_REMOTE_NODE_COUNT
)
245 dword_location
= group_index
/ SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD
;
246 dword_remainder
= group_index
% SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD
;
248 dword_value
= remote_node_table
->available_remote_nodes
[dword_location
];
249 dword_value
&= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE
<< (dword_remainder
* 4));
250 remote_node_table
->available_remote_nodes
[dword_location
] = dword_value
;
254 * sci_remote_node_table_set_group()
256 * THis method sets an entire remote node group in the remote node table.
258 static void sci_remote_node_table_set_group(
259 struct sci_remote_node_table
*remote_node_table
,
267 (remote_node_table
->available_nodes_array_size
* SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD
)
268 <= (group_index
/ SCU_STP_REMOTE_NODE_COUNT
)
271 dword_location
= group_index
/ SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD
;
272 dword_remainder
= group_index
% SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD
;
274 dword_value
= remote_node_table
->available_remote_nodes
[dword_location
];
275 dword_value
|= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE
<< (dword_remainder
* 4));
276 remote_node_table
->available_remote_nodes
[dword_location
] = dword_value
;
280 * sci_remote_node_table_get_group_value()
281 * @remote_node_table: This is the remote node table that for which the group
282 * value is to be returned.
283 * @group_index: This is the group index to use to find the group value.
285 * This method will return the group value for the specified group index. The
286 * bit values at the specified remote node group index.
288 static u8
sci_remote_node_table_get_group_value(
289 struct sci_remote_node_table
*remote_node_table
,
296 dword_location
= group_index
/ SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD
;
297 dword_remainder
= group_index
% SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD
;
299 dword_value
= remote_node_table
->available_remote_nodes
[dword_location
];
300 dword_value
&= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE
<< (dword_remainder
* 4));
301 dword_value
= dword_value
>> (dword_remainder
* 4);
303 return (u8
)dword_value
;
307 * sci_remote_node_table_initialize()
308 * @remote_node_table: The remote that which is to be initialized.
309 * @remote_node_entries: The number of entries to put in the table.
311 * This method will initialize the remote node table for use. none
313 void sci_remote_node_table_initialize(
314 struct sci_remote_node_table
*remote_node_table
,
315 u32 remote_node_entries
)
320 * Initialize the raw data we could improve the speed by only initializing
321 * those entries that we are actually going to be used */
323 remote_node_table
->available_remote_nodes
,
325 sizeof(remote_node_table
->available_remote_nodes
)
329 remote_node_table
->remote_node_groups
,
331 sizeof(remote_node_table
->remote_node_groups
)
334 /* Initialize the available remote node sets */
335 remote_node_table
->available_nodes_array_size
= (u16
)
336 (remote_node_entries
/ SCIC_SDS_REMOTE_NODES_PER_DWORD
)
337 + ((remote_node_entries
% SCIC_SDS_REMOTE_NODES_PER_DWORD
) != 0);
340 /* Initialize each full DWORD to a FULL SET of remote nodes */
341 for (index
= 0; index
< remote_node_entries
; index
++) {
342 sci_remote_node_table_set_node_index(remote_node_table
, index
);
345 remote_node_table
->group_array_size
= (u16
)
346 (remote_node_entries
/ (SCU_STP_REMOTE_NODE_COUNT
* 32))
347 + ((remote_node_entries
% (SCU_STP_REMOTE_NODE_COUNT
* 32)) != 0);
349 for (index
= 0; index
< (remote_node_entries
/ SCU_STP_REMOTE_NODE_COUNT
); index
++) {
351 * These are all guaranteed to be full slot values so fill them in the
352 * available sets of 3 remote nodes */
353 sci_remote_node_table_set_group_index(remote_node_table
, 2, index
);
356 /* Now fill in any remainders that we may find */
357 if ((remote_node_entries
% SCU_STP_REMOTE_NODE_COUNT
) == 2) {
358 sci_remote_node_table_set_group_index(remote_node_table
, 1, index
);
359 } else if ((remote_node_entries
% SCU_STP_REMOTE_NODE_COUNT
) == 1) {
360 sci_remote_node_table_set_group_index(remote_node_table
, 0, index
);
365 * sci_remote_node_table_allocate_single_remote_node()
366 * @remote_node_table: The remote node table from which to allocate a
368 * @group_table_index: The group index that is to be used for the search.
370 * This method will allocate a single RNi from the remote node table. The
371 * table index will determine from which remote node group table to search.
372 * This search may fail and another group node table can be specified. The
373 * function is designed to allow a serach of the available single remote node
374 * group up to the triple remote node group. If an entry is found in the
375 * specified table the remote node is removed and the remote node groups are
376 * updated. The RNi value or an invalid remote node context if an RNi can not
379 static u16
sci_remote_node_table_allocate_single_remote_node(
380 struct sci_remote_node_table
*remote_node_table
,
381 u32 group_table_index
)
386 u16 remote_node_index
= SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX
;
388 group_index
= sci_remote_node_table_get_group_index(
389 remote_node_table
, group_table_index
);
391 /* We could not find an available slot in the table selector 0 */
392 if (group_index
!= SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX
) {
393 group_value
= sci_remote_node_table_get_group_value(
394 remote_node_table
, group_index
);
396 for (index
= 0; index
< SCU_STP_REMOTE_NODE_COUNT
; index
++) {
397 if (((1 << index
) & group_value
) != 0) {
398 /* We have selected a bit now clear it */
399 remote_node_index
= (u16
)(group_index
* SCU_STP_REMOTE_NODE_COUNT
402 sci_remote_node_table_clear_group_index(
403 remote_node_table
, group_table_index
, group_index
406 sci_remote_node_table_clear_node_index(
407 remote_node_table
, remote_node_index
410 if (group_table_index
> 0) {
411 sci_remote_node_table_set_group_index(
412 remote_node_table
, group_table_index
- 1, group_index
421 return remote_node_index
;
425 * sci_remote_node_table_allocate_triple_remote_node()
426 * @remote_node_table: This is the remote node table from which to allocate the
427 * remote node entries.
428 * @group_table_index: This is the group table index which must equal two (2)
429 * for this operation.
431 * This method will allocate three consecutive remote node context entries. If
432 * there are no remaining triple entries the function will return a failure.
433 * The remote node index that represents three consecutive remote node entries
434 * or an invalid remote node context if none can be found.
436 static u16
sci_remote_node_table_allocate_triple_remote_node(
437 struct sci_remote_node_table
*remote_node_table
,
438 u32 group_table_index
)
441 u16 remote_node_index
= SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX
;
443 group_index
= sci_remote_node_table_get_group_index(
444 remote_node_table
, group_table_index
);
446 if (group_index
!= SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX
) {
447 remote_node_index
= (u16
)group_index
* SCU_STP_REMOTE_NODE_COUNT
;
449 sci_remote_node_table_clear_group_index(
450 remote_node_table
, group_table_index
, group_index
453 sci_remote_node_table_clear_group(
454 remote_node_table
, group_index
458 return remote_node_index
;
462 * sci_remote_node_table_allocate_remote_node()
463 * @remote_node_table: This is the remote node table from which the remote node
464 * allocation is to take place.
465 * @remote_node_count: This is ther remote node count which is one of
466 * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
468 * This method will allocate a remote node that mataches the remote node count
469 * specified by the caller. Valid values for remote node count is
470 * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
471 * the remote node index that is returned or an invalid remote node context.
473 u16
sci_remote_node_table_allocate_remote_node(
474 struct sci_remote_node_table
*remote_node_table
,
475 u32 remote_node_count
)
477 u16 remote_node_index
= SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX
;
479 if (remote_node_count
== SCU_SSP_REMOTE_NODE_COUNT
) {
481 sci_remote_node_table_allocate_single_remote_node(
482 remote_node_table
, 0);
484 if (remote_node_index
== SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX
) {
486 sci_remote_node_table_allocate_single_remote_node(
487 remote_node_table
, 1);
490 if (remote_node_index
== SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX
) {
492 sci_remote_node_table_allocate_single_remote_node(
493 remote_node_table
, 2);
495 } else if (remote_node_count
== SCU_STP_REMOTE_NODE_COUNT
) {
497 sci_remote_node_table_allocate_triple_remote_node(
498 remote_node_table
, 2);
501 return remote_node_index
;
505 * sci_remote_node_table_release_single_remote_node()
506 * @remote_node_table: This is the remote node table from which the remote node
507 * release is to take place.
508 * @remote_node_index: This is the remote node index that is being released.
509 * This method will free a single remote node index back to the remote node
510 * table. This routine will update the remote node groups
512 static void sci_remote_node_table_release_single_remote_node(
513 struct sci_remote_node_table
*remote_node_table
,
514 u16 remote_node_index
)
519 group_index
= remote_node_index
/ SCU_STP_REMOTE_NODE_COUNT
;
521 group_value
= sci_remote_node_table_get_group_value(remote_node_table
, group_index
);
524 * Assert that we are not trying to add an entry to a slot that is already
526 BUG_ON(group_value
== SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE
);
528 if (group_value
== 0x00) {
530 * There are no entries in this slot so it must be added to the single
532 sci_remote_node_table_set_group_index(remote_node_table
, 0, group_index
);
533 } else if ((group_value
& (group_value
- 1)) == 0) {
535 * There is only one entry in this slot so it must be moved from the
536 * single slot table to the dual slot table */
537 sci_remote_node_table_clear_group_index(remote_node_table
, 0, group_index
);
538 sci_remote_node_table_set_group_index(remote_node_table
, 1, group_index
);
541 * There are two entries in the slot so it must be moved from the dual
542 * slot table to the tripple slot table. */
543 sci_remote_node_table_clear_group_index(remote_node_table
, 1, group_index
);
544 sci_remote_node_table_set_group_index(remote_node_table
, 2, group_index
);
547 sci_remote_node_table_set_node_index(remote_node_table
, remote_node_index
);
551 * sci_remote_node_table_release_triple_remote_node()
552 * @remote_node_table: This is the remote node table to which the remote node
553 * index is to be freed.
554 * @remote_node_index: This is the remote node index that is being released.
556 * This method will release a group of three consecutive remote nodes back to
557 * the free remote nodes.
559 static void sci_remote_node_table_release_triple_remote_node(
560 struct sci_remote_node_table
*remote_node_table
,
561 u16 remote_node_index
)
565 group_index
= remote_node_index
/ SCU_STP_REMOTE_NODE_COUNT
;
567 sci_remote_node_table_set_group_index(
568 remote_node_table
, 2, group_index
571 sci_remote_node_table_set_group(remote_node_table
, group_index
);
575 * sci_remote_node_table_release_remote_node_index()
576 * @remote_node_table: The remote node table to which the remote node index is
578 * @remote_node_count: This is the count of consecutive remote nodes that are
580 * @remote_node_index: This is the remote node index that is being released.
582 * This method will release the remote node index back into the remote node
585 void sci_remote_node_table_release_remote_node_index(
586 struct sci_remote_node_table
*remote_node_table
,
587 u32 remote_node_count
,
588 u16 remote_node_index
)
590 if (remote_node_count
== SCU_SSP_REMOTE_NODE_COUNT
) {
591 sci_remote_node_table_release_single_remote_node(
592 remote_node_table
, remote_node_index
);
593 } else if (remote_node_count
== SCU_STP_REMOTE_NODE_COUNT
) {
594 sci_remote_node_table_release_triple_remote_node(
595 remote_node_table
, remote_node_index
);