4 * Part of gwm, the Gratuitous Window Manager,
5 * by Gary Wong, <gtw@gnu.org>.
7 * Copyright (C) 2009 Gary Wong
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of version 3 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
36 #include "window-table.h"
38 /* Cuckoo hashed window table: see _Cuckoo Hashing_, R. Pagh and F. Rodler,
39 Journal of Algorithms 51 (2004), pp. 122-144. */
41 struct window_table windows
, update_windows
;
43 extern INIT
void table_init( struct window_table
*table
) {
48 table
->hash
= 0xBA97D5B1UL
;
50 table
->t
[ 0 ] = xcalloc( table
->n
<< 1, sizeof table
->t
[ 0 ][ 0 ] );
51 table
->t
[ 1 ] = table
->t
[ 0 ] + table
->n
;
52 table
->values
= xmalloc( table
->n
* sizeof *table
->values
);
56 extern void table_destroy( struct window_table
*table
) {
58 free( table
->t
[ 0 ] );
59 free( table
->values
);
63 static CONST
int table_hash( unsigned long hash
, int n
, int half
,
67 #if defined( UINT64_MAX ) || defined( uint64_t )
74 h0
= hash
^ 0x79D9C6D4UL
;
75 h1
= hash
* 0x6A391E85UL
;
78 h1
= hash
^ 0x52C694B6UL
;
81 #if defined( UINT64_MAX ) || defined( uint64_t ) || ULONG_MAX > 0xFFFFFFFFUL
98 static PURE
int table_lookup_index( struct window_table
*table
,
105 h
= table_hash( table
->hash
, table
->n
, 0, key
);
106 if( table
->t
[ 0 ][ h
].key
== key
)
107 return table
->t
[ 0 ][ h
].index
;
109 h
= table_hash( table
->hash
, table
->n
, 1, key
);
110 if( table
->t
[ 1 ][ h
].key
== key
)
111 return table
->t
[ 1 ][ h
].index
;
116 extern PURE
struct gwm_window
*table_lookup( struct window_table
*table
,
124 return ( index
= table_lookup_index( table
, key
) ) >= 0 ?
125 table
->values
[ index
] : NULL
;
128 static void table_insert_index( struct window_table
*table
, xcb_window_t key
,
131 static void table_rehash( struct window_table
*table
, unsigned long hash
,
132 unsigned long n
, unsigned long max
) {
134 int old_n
= table
->n
;
135 struct half_table
*old_t
[ 2 ];
138 old_t
[ 0 ] = table
->t
[ 0 ];
139 old_t
[ 1 ] = table
->t
[ 1 ];
146 table
->t
[ 0 ] = xcalloc( table
->n
<< 1, sizeof table
->t
[ 0 ][ 0 ] );
147 table
->t
[ 1 ] = table
->t
[ 0 ] + table
->n
;
148 table
->values
= xrealloc( table
->values
, table
->n
* sizeof *table
->values
);
150 /* We've now gotten everything we want out of the old table. Although
151 table_insert might possibly cause inefficient re-entrancy, that case
152 should be both harmless and exceedingly rare. */
153 for( i
= 0; i
< old_n
; i
++ ) {
154 if( old_t
[ 0 ][ i
].key
)
155 table_insert_index( table
, old_t
[ 0 ][ i
].key
,
156 old_t
[ 0 ][ i
].index
);
157 if( old_t
[ 1 ][ i
].key
)
158 table_insert_index( table
, old_t
[ 1 ][ i
].key
,
159 old_t
[ 1 ][ i
].index
);
165 static void table_insert_index( struct window_table
*table
, xcb_window_t key
,
173 for( i
= 0; i
< table
->max
; i
++ ) {
174 int h
= table_hash( table
->hash
, table
->n
, i
& 1, key
);
175 xcb_window_t old_key
= table
->t
[ i
& 1 ][ h
].key
;
176 int old_index
= table
->t
[ i
& 1 ][ h
].index
;
178 table
->t
[ i
& 1 ][ h
].key
= key
;
179 table
->t
[ i
& 1 ][ h
].index
= index
;
188 /* Failure to insert: rehash. */
189 table_rehash( table
, table
->hash
+ 0x5DB1BA96UL
,
190 table
->n
, table
->max
);
194 static void table_insert( struct window_table
*table
, xcb_window_t key
,
195 struct gwm_window
*value
) {
200 assert( !table_lookup( table
, key
) );
202 table
->values
[ table
->used
] = value
;
204 if( table
->used
> ( ( table
->n
* 3 ) >> 2 ) )
205 /* Load becoming high: resize. */
206 table_rehash( table
, table
->hash
, table
->n
<< 1, table
->max
+ 4 );
208 table_insert_index( table
, key
, table
->used
);
211 static void table_delete( struct window_table
*table
, xcb_window_t key
) {
219 for( i
= 0; i
< 2; i
++ ) {
220 h
= table_hash( table
->hash
, table
->n
, i
, key
);
221 if( table
->t
[ i
][ h
].key
== key
) {
223 xcb_window_t replacement_key
;
224 struct gwm_window
*replacement_value
;
225 int replacement_hash
;
229 if( ( index
= table
->t
[ i
][ h
].index
) < table
->used
) {
230 /* Must replace our old index, to keep the values array
232 table
->values
[ index
] = replacement_value
=
233 table
->values
[ table
->used
];
234 replacement_key
= replacement_value
->w
;
236 replacement_hash
= table_hash( table
->hash
, table
->n
, 0,
238 if( table
->t
[ 0 ][ replacement_hash
].key
== replacement_key
)
239 table
->t
[ 0 ][ replacement_hash
].index
= index
;
241 replacement_hash
= table_hash( table
->hash
, table
->n
, 1,
243 assert( table
->t
[ 1 ][ replacement_hash
].key
==
245 table
->t
[ 1 ][ replacement_hash
].index
= index
;
249 table
->t
[ i
][ h
].key
= 0;
251 if( table
->used
< ( ( table
->n
* 3 ) >> 4 ) && table
->n
> 32 )
252 /* Load becoming low: resize. */
253 table_rehash( table
, table
->hash
, table
->n
>> 1,
260 extern void queue_window_update( struct gwm_window
*window
,
261 int x
, int y
, int width
, int height
,
264 /* FIXME Consider computing the region instead of the bounding box. */
266 if( !window
->update
.width
|| !window
->update
.height
) {
267 /* New update region. */
268 window
->update
.x
= x
;
269 window
->update
.y
= y
;
270 window
->update
.width
= width
;
271 window
->update
.height
= height
;
274 window
->cleared
= FALSE
;
276 /* Compute the bounding box of the union of the old and new regions.
277 If the bounding box is enlarged, we reset the cleared flag,
278 because it is very likely that it now includes areas which
279 were not in either of the two original regions (and therefore
280 not guaranteed to be cleared). */
281 if( x
< window
->update
.x
) {
282 window
->update
.width
+= window
->update
.x
- x
;
283 window
->update
.x
= x
;
284 window
->cleared
= FALSE
;
287 if( y
< window
->update
.y
) {
288 window
->update
.height
+= window
->update
.y
- y
;
289 window
->update
.y
= y
;
290 window
->cleared
= FALSE
;
293 if( x
+ width
> window
->update
.x
+ window
->update
.width
) {
294 window
->update
.width
= x
+ width
- window
->update
.x
;
295 window
->cleared
= FALSE
;
298 if( y
+ height
> window
->update
.y
+ window
->update
.height
) {
299 window
->update
.height
= y
+ height
- window
->update
.y
;
300 window
->cleared
= FALSE
;
304 if( table_lookup( &update_windows
, window
->w
) )
307 table_insert( &update_windows
, window
->w
, window
);
310 extern void window_update_done( struct gwm_window
*window
) {
312 window
->update
.x
= 0;
313 window
->update
.y
= 0;
314 window
->update
.width
= 0;
315 window
->update
.height
= 0;
316 window
->cleared
= TRUE
;
318 table_delete( &update_windows
, window
->w
);
321 extern MALLOC
struct gwm_window
*add_window( xcb_window_t w
) {
323 struct gwm_window
*window
;
325 if( table_lookup( &windows
, w
) )
326 return NULL
; /* window already exists */
328 window
= xmalloc( sizeof *window
);
330 window_update_done( window
);
332 table_insert( &windows
, window
->w
, window
);
337 extern void forget_window( struct gwm_window
*window
) {
339 if( pointer_demux
== window
->w
)
340 pointer_demux
= XCB_NONE
;
342 table_delete( &windows
, window
->w
);
343 table_delete( &update_windows
, window
->w
);
347 extern struct gwm_window
*lookup_window( xcb_window_t w
) {
349 struct gwm_window
*window
;
352 window
= table_lookup( &windows
, w
);
354 if( window
&& window
->type
== WINDOW_INCOMPLETE
)
355 /* Whoops... all this asynchronicity has gotten us ahead
356 of ourselves. Catch up on what we deferred, by waiting
357 until the window is complete before we continue. */
358 sync_with_callback( window
->u
.incomplete
.sequence
);
364 struct window_stack window_stack
;
366 extern INIT
void stack_init( struct window_stack
*stack
) {
371 stack
->hash
= 0xBA97D5B1UL
;
373 stack
->t
[ 0 ] = xcalloc( stack
->n
<< 1, sizeof stack
->t
[ 0 ][ 0 ] );
374 stack
->t
[ 1 ] = stack
->t
[ 0 ] + stack
->n
;
378 extern void stack_destroy( struct window_stack
*stack
) {
380 free( stack
->t
[ 0 ] );
384 extern PURE
struct stacking_order
*stack_lookup( struct window_stack
*stack
,
392 h
= table_hash( stack
->hash
, stack
->n
, 0, key
);
393 if( stack
->t
[ 0 ][ h
].window
== key
)
394 return &stack
->t
[ 0 ][ h
];
396 h
= table_hash( stack
->hash
, stack
->n
, 1, key
);
397 if( stack
->t
[ 1 ][ h
].window
== key
)
398 return &stack
->t
[ 1 ][ h
];
403 static void stack_insert( struct window_stack
*stack
, xcb_window_t window
,
404 xcb_window_t lower_window
,
405 xcb_window_t higher_window
);
407 static void stack_rehash( struct window_stack
*stack
, unsigned long hash
,
408 unsigned long n
, unsigned long max
) {
410 int old_n
= stack
->n
;
411 struct stacking_order
*old_t
[ 2 ];
414 old_t
[ 0 ] = stack
->t
[ 0 ];
415 old_t
[ 1 ] = stack
->t
[ 1 ];
422 stack
->t
[ 0 ] = xcalloc( stack
->n
<< 1, sizeof stack
->t
[ 0 ][ 0 ] );
423 stack
->t
[ 1 ] = stack
->t
[ 0 ] + stack
->n
;
425 /* We've now gotten everything we want out of the old stack. Although
426 stack_insert might possibly cause inefficient re-entrancy, that case
427 should be both harmless and exceedingly rare. */
428 for( i
= 0; i
< old_n
; i
++ ) {
429 if( old_t
[ 0 ][ i
].window
)
430 stack_insert( stack
, old_t
[ 0 ][ i
].window
,
431 old_t
[ 0 ][ i
].lower_window
,
432 old_t
[ 0 ][ i
].higher_window
);
433 if( old_t
[ 1 ][ i
].window
)
434 stack_insert( stack
, old_t
[ 1 ][ i
].window
,
435 old_t
[ 1 ][ i
].lower_window
,
436 old_t
[ 1 ][ i
].higher_window
);
442 static void stack_insert( struct window_stack
*stack
, xcb_window_t window
,
443 xcb_window_t lower_window
,
444 xcb_window_t higher_window
) {
451 for( i
= 0; i
< stack
->max
; i
++ ) {
452 int h
= table_hash( stack
->hash
, stack
->n
, i
& 1, window
);
453 xcb_window_t old_window
= stack
->t
[ i
& 1 ][ h
].window
;
454 xcb_window_t old_lower
, old_higher
;
457 old_lower
= stack
->t
[ i
& 1 ][ h
].lower_window
;
458 old_higher
= stack
->t
[ i
& 1 ][ h
].higher_window
;
461 stack
->t
[ i
& 1 ][ h
].window
= window
;
462 stack
->t
[ i
& 1 ][ h
].lower_window
= lower_window
;
463 stack
->t
[ i
& 1 ][ h
].higher_window
= higher_window
;
469 lower_window
= old_lower
;
470 higher_window
= old_higher
;
473 /* Failure to insert: rehash. */
474 stack_rehash( stack
, stack
->hash
+ 0x5DB1BA96UL
,
475 stack
->n
, stack
->max
);
479 static void stack_insert_new( struct window_stack
*stack
, xcb_window_t window
,
480 xcb_window_t lower_window
,
481 xcb_window_t higher_window
) {
486 assert( !stack_lookup( stack
, window
) );
488 if( stack
->used
> ( ( stack
->n
* 3 ) >> 2 ) )
489 /* Load becoming high: resize. */
490 stack_rehash( stack
, stack
->hash
, stack
->n
<< 1, stack
->max
+ 4 );
492 stack_insert( stack
, window
, lower_window
, higher_window
);
495 extern void stack_insert_singleton( struct window_stack
*stack
,
501 stack_insert_new( stack
, key
, key
, key
);
503 assert( stack_lookup( stack
, key
) );
506 extern void stack_insert_above( struct window_stack
*stack
,
507 xcb_window_t key
, xcb_window_t lower_window
) {
509 struct stacking_order
*l
, *h
;
510 xcb_window_t higher_window
;
515 l
= stack_lookup( stack
, lower_window
);
517 higher_window
= l
->higher_window
;
519 stack_insert_new( stack
, key
, lower_window
, higher_window
);
520 /* It's essential to repeat the lookup of lower_window, because inserting
521 the new entry might have disturbed the existing ones. */
522 l
= stack_lookup( stack
, lower_window
);
523 h
= stack_lookup( stack
, higher_window
);
524 h
->lower_window
= key
;
525 l
->higher_window
= key
;
527 assert( stack_lookup( stack
, key
) );
532 extern void stack_remove( struct window_stack
*stack
, xcb_window_t window
) {
534 struct stacking_order
*l
, *h
, *entry
;
540 entry
= stack_lookup( stack
, window
);
543 l
= stack_lookup( stack
, entry
->lower_window
);
546 h
= stack_lookup( stack
, entry
->higher_window
);
549 l
->higher_window
= entry
->higher_window
;
550 h
->lower_window
= entry
->lower_window
;
552 for( i
= 0; i
< 2; i
++ ) {
553 hash
= table_hash( stack
->hash
, stack
->n
, i
, window
);
554 if( stack
->t
[ i
][ hash
].window
== window
) {
555 stack
->t
[ i
][ hash
].window
= 0;
557 if( stack
->used
< ( ( stack
->n
* 3 ) >> 4 ) && stack
->n
> 32 )
558 /* Load becoming low: resize. */
559 stack_rehash( stack
, stack
->hash
, stack
->n
>> 1,
562 assert( !stack_lookup( stack
, window
) );
571 extern void stack_move_above( struct window_stack
*stack
,
573 xcb_window_t lower_window
) {
575 struct stacking_order
*l
, *h
, *entry
;
577 if( !window
|| window
== lower_window
)
580 entry
= stack_lookup( stack
, window
);
583 if( entry
->lower_window
== lower_window
)
586 l
= stack_lookup( stack
, entry
->lower_window
);
589 h
= stack_lookup( stack
, entry
->higher_window
);
592 l
->higher_window
= entry
->higher_window
;
593 h
->lower_window
= entry
->lower_window
;
595 l
= stack_lookup( stack
, lower_window
);
598 h
= stack_lookup( stack
, l
->higher_window
);
601 h
->lower_window
= window
;
602 l
->higher_window
= window
;
603 entry
->lower_window
= lower_window
;
604 entry
->higher_window
= h
->window
;
606 assert( stack_lookup( stack
, window
) );
609 extern xcb_window_t
stack_real_below( struct window_stack
*stack
,
610 xcb_window_t window
) {
617 for( search
= stack_lookup( stack
, window
)->lower_window
;
618 search
!= window
&& ( search
& STACK_MASK
) != STACK_END
;
619 search
= stack_lookup( stack
, search
)->lower_window
)
620 if( !( search
& STACK_MASK
) )