1 /***************************************************************************
2 * Copyright (C) 2007 by www.databasecache.com *
3 * Contact: praba_tuty@databasecache.com *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
15 ***************************************************************************/
17 #include<CatalogTables.h>
23 #include<PredicateImpl.h>
25 /* Defines `hashpjw' function by P.J. Weinberger
26 [see Aho/Sethi/Ullman, COMPILERS: Principles, Techniques and Tools,
29 unsigned int hashString(char *strVal
)
37 hval
+= (unsigned int) *str
++;
38 g
= hval
& ((unsigned int) 0xf << (32 - 4));
41 hval
^= g
>> (32 - 8);
48 unsigned int hashBinary(char *strVal
, int length
)
54 while (iter
!= length
)
57 hval
+= (unsigned int) *str
++;
58 g
= hval
& ((unsigned int) 0xf << (32 - 4));
61 hval
^= g
>> (32 - 8);
69 unsigned int HashIndex::computeHashBucket(DataType type
, void *key
, int noOfBuckets
, int length
)
72 if (typeInt
== type
) {
74 return val
% noOfBuckets
;
75 }else if (typeString
== type
) {
76 unsigned int val
= hashString((char*)key
);
77 return val
% noOfBuckets
;
78 }else if (typeShort
== type
) {
79 short val
= *(short*) key
;
80 return val
% noOfBuckets
;
81 }else if (typeLong
== type
) {
82 long val
= *(long*) key
;
83 return val
% noOfBuckets
;
84 }else if (typeLongLong
== type
) {
85 long long val
= *(long long*) key
;
86 return val
% noOfBuckets
;
87 }else if (typeByteInt
== type
) {
88 ByteInt val
= *(ByteInt
*)key
;
89 return val
% noOfBuckets
;
90 }else if (typeDate
== type
) {
92 return val
% noOfBuckets
;
93 }else if (typeTime
== type
) {
95 return val
% noOfBuckets
;
96 }else if (typeComposite
== type
) {
97 unsigned int val
= hashBinary((char*)key
, length
);
98 return val
% noOfBuckets
;
99 }else if (typeBinary
== type
) {
100 unsigned int val
= hashBinary((char*)key
, length
);
101 return val
% noOfBuckets
;
102 }else if (typeULong
== type
) {
103 unsigned long val
= *(unsigned long*)key
;
104 return val
% noOfBuckets
;
106 printError(ErrSysFatal
,"Type not supported for hashing\n");
110 DbRetVal
HashIndex::insert(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool undoFlag
)
112 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
113 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
115 int noOfBuckets
= info
->noOfBuckets
;
116 int offset
= info
->fldOffset
;
117 DataType type
= info
->type
;
118 char *keyBuffer
= (char*) malloc(info
->compLength
);
119 memset(keyBuffer
, 0, info
->compLength
);
120 void *keyStartBuffer
= keyBuffer
, *keyPtr
;
121 FieldIterator iter
= info
->idxFldList
.getIterator();
122 while(iter
.hasElement())
124 FieldDef
*def
= iter
.nextElement();
125 keyPtr
= (char *)tuple
+ def
->offset_
;
126 AllDataType::copyVal(keyBuffer
, keyPtr
, def
->type_
, def
->length_
);
127 keyBuffer
= keyBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
130 printDebug(DM_HashIndex
, "Inserting hash index node for %s", iptr
->indName_
);
131 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
132 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
133 keyPtr
=(void*)((char*)tuple
+ offset
);
135 if (type
== typeComposite
)
136 bucketNo
= computeHashBucket(type
, keyStartBuffer
, noOfBuckets
, info
->compLength
);
138 bucketNo
= computeHashBucket(type
, keyPtr
, noOfBuckets
, info
->compLength
);
139 printDebug(DM_HashIndex
, "HashIndex insert bucketno %d", bucketNo
);
140 Bucket
*bucket
= &(buckets
[bucketNo
]);
141 HashUndoLogInfo
*hInfo
= new HashUndoLogInfo();
142 hInfo
->metaData_
= tbl
->db_
->getMetaDataPtr();
143 hInfo
->bucket_
= bucket
;
144 hInfo
->tuple_
= tuple
;
145 hInfo
->hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
146 hInfo
->keyPtr_
= keyPtr
;
147 int ret
= bucket
->mutex_
.getLock(tbl
->db_
->procSlot
);
151 free(keyStartBuffer
);
152 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",bucketNo
);
153 return ErrLockTimeOut
;
155 HashIndexNode
*head
= (HashIndexNode
*) bucket
->bucketList_
;
156 if (head
&& info
->isUnique
)
158 BucketList
list(head
);
159 BucketIter iter
= list
.getIterator();
162 printDebug(DM_HashIndex
, "HashIndex insert Checking for unique");
165 while((node
= iter
.next()) != NULL
)
167 bucketTuple
= node
->ptrToTuple_
;
168 if (type
== typeComposite
) {
169 FieldIterator fldIter
= info
->idxFldList
.getIterator();
171 while (fldIter
.hasElement()) {
172 FieldDef
*def
= fldIter
.nextElement();
173 res
= AllDataType::compareVal((char *)bucketTuple
+ def
->offset_
, (char *)tuple
+ def
->offset_
, OpEquals
, def
->type_
, def
->length_
);
177 else res
= AllDataType::compareVal((void*)((char*)bucketTuple
+offset
), (void*)((char*)tuple
+offset
), OpEquals
,type
, info
->compLength
);
181 free(keyStartBuffer
);
182 printError(ErrUnique
, "Unique key violation");
183 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
189 printDebug(DM_HashIndex
, "HashIndex insert into bucket list");
192 printDebug(DM_HashIndex
, "HashIndex insert head is empty");
194 HashIndexNode
*firstNode
= (HashIndexNode
*)(((Chunk
*)iptr
->hashNodeChunk_
)->allocate(tbl
->db_
, &rv
));
195 if (firstNode
== NULL
)
197 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
199 free(keyStartBuffer
);
202 firstNode
->ptrToKey_
= keyPtr
;
203 firstNode
->ptrToTuple_
= tuple
;
204 firstNode
->next_
= NULL
;
205 bucket
->bucketList_
= (HashIndexNode
*)firstNode
;
206 printDebug(DM_HashIndex
, "HashIndex insert new node %x in empty bucket", bucket
->bucketList_
);
210 BucketList
list(head
);
211 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
213 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
215 free(keyStartBuffer
);
221 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, InsertHashIndexOperation
, hInfo
, sizeof(HashUndoLogInfo
));
224 BucketList
list(head
);
225 rc
= list
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
226 if (rc
!=OK
) printError(ErrSysFatal
, "double failure on undo log insert followed by hash bucket list remove\n");
227 bucket
->bucketList_
= list
.getBucketListHead();
230 free(keyStartBuffer
);
231 delete hInfo
; hInfo
= NULL
;
232 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
237 DbRetVal
HashIndex::remove(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool undoFlag
)
239 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
241 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
242 DataType type
= info
->type
;
243 int offset
= info
->fldOffset
;
244 int noOfBuckets
= info
->noOfBuckets
;
246 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
247 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
248 char *keyBuffer
= (char*) malloc(info
->compLength
);
249 memset(keyBuffer
, 0, info
->compLength
);
250 void *keyStartBuffer
= keyBuffer
, *keyPtr
;
251 FieldIterator iter
= info
->idxFldList
.getIterator();
252 while(iter
.hasElement())
254 FieldDef
*def
= iter
.nextElement();
255 keyPtr
= (char *)tuple
+ def
->offset_
;
256 AllDataType::copyVal(keyBuffer
, keyPtr
, def
->type_
, def
->length_
);
257 keyBuffer
= keyBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
260 keyPtr
=(void*)((char*)tuple
+ offset
);
262 if (type
== typeComposite
)
263 bucket
= HashIndex::computeHashBucket(type
, keyStartBuffer
, noOfBuckets
, info
->compLength
);
264 else bucket
= HashIndex::computeHashBucket(type
, keyPtr
, noOfBuckets
, info
->compLength
);
266 Bucket
*bucket1
= &buckets
[bucket
];
267 HashUndoLogInfo
*hInfo
= new HashUndoLogInfo();
268 hInfo
->metaData_
= tbl
->db_
->getMetaDataPtr();
269 hInfo
->bucket_
= bucket1
;
270 hInfo
->tuple_
= tuple
;
271 hInfo
->hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
272 hInfo
->keyPtr_
= keyPtr
;
274 int ret
= bucket1
->mutex_
.getLock(tbl
->db_
->procSlot
);
278 free(keyStartBuffer
);
279 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",bucket
);
280 return ErrLockTimeOut
;
282 HashIndexNode
*head
= (HashIndexNode
*) bucket1
->bucketList_
;
284 if (!head
) { printError(ErrNotExists
, "Hash index does not exist:should never happen\n");
285 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
287 free(keyStartBuffer
);
290 BucketList
list(head
);
291 printDebug(DM_HashIndex
, "Removing hash index node from head %x", head
);
293 DbRetVal rc
= list
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
296 printDebug(DM_HashIndex
, "Removing hash index node from head ");
297 bucket1
->bucketList_
= list
.getBucketListHead();
301 rc
=tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, DeleteHashIndexOperation
, hInfo
, sizeof(HashUndoLogInfo
));
304 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
305 if (rc
!=OK
) printError(ErrSysFatal
, "double failure on undo log remove followed by hash bucket list insert\n");
306 bucket1
->bucketList_
= list
.getBucketListHead();
309 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
311 free(keyStartBuffer
);
315 DbRetVal
HashIndex::update(TableImpl
*tbl
, Transaction
*tr
, void *indexPtr
, IndexInfo
*indInfo
, void *tuple
, bool undoFlag
)
317 CINDEX
*iptr
= (CINDEX
*)indexPtr
;
319 HashIndexInfo
*info
= (HashIndexInfo
*) indInfo
;
320 DataType type
= info
->type
;
321 int offset
= info
->fldOffset
;
322 int noOfBuckets
= info
->noOfBuckets
;
324 //check whether the index key is updated or not
325 //if it is not updated return from here
326 void *keyPtr
=(void*)((char*)tuple
+ offset
);
327 char *kPtr
= (char*)keyPtr
;
329 //creating old key value buffer for composite primary keys
330 char *oldKeyBuffer
= (char*) malloc(info
->compLength
);
331 memset(oldKeyBuffer
, 0, info
->compLength
);
332 void *oldKeyStartBuffer
= oldKeyBuffer
;
333 FieldIterator iter
= info
->idxFldList
.getIterator();
334 while(iter
.hasElement()) {
335 FieldDef
*def
= iter
.nextElement();
336 keyPtr
= (char *)tuple
+ def
->offset_
;
337 AllDataType::copyVal(oldKeyBuffer
, keyPtr
, def
->type_
, def
->length_
);
338 oldKeyBuffer
= oldKeyBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
341 keyPtr
= (void *) kPtr
;
342 //Iterate through the bind list and check
343 FieldIterator idxFldIter
= info
->idxFldList
.getIterator();
344 char *keyBindBuffer
;
345 if(type
==typeBinary
) {
346 keyBindBuffer
= (char*) malloc(2 * info
->compLength
);
347 memset(keyBindBuffer
, 0, 2 * info
->compLength
);
349 keyBindBuffer
= (char*) malloc(info
->compLength
);
350 memset(keyBindBuffer
, 0, info
->compLength
);
352 void *keyStartBuffer
= (void*) keyBindBuffer
;
353 bool keyUpdated
= false;
355 while (idxFldIter
.hasElement()) {
356 FieldDef
*idef
= idxFldIter
.nextElement();
357 FieldIterator fldIter
= tbl
->fldList_
.getIterator();
358 while (fldIter
.hasElement()) {
359 FieldDef
*def
= fldIter
.nextElement();
360 if (0 == strcmp(def
->fldName_
, idef
->fldName_
)) {
361 if (NULL
!= def
->bindVal_
) {
362 if(type
==typeBinary
) {
363 AllDataType::copyVal(keyBindBuffer
, def
->bindVal_
,
364 def
->type_
, 2*def
->length_
);
365 keyStartBuffer
=calloc(1,info
->compLength
);
366 AllDataType::convertToBinary(keyStartBuffer
, keyBindBuffer
, typeString
, info
->compLength
);
369 AllDataType::copyVal(keyBindBuffer
, def
->bindVal_
,
370 def
->type_
, def
->length_
);
371 keyBindBuffer
= keyBindBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
374 AllDataType::copyVal(keyBindBuffer
, (char *) tuple
+ def
->offset_
, def
->type_
, def
->length_
);
375 keyBindBuffer
= keyBindBuffer
+ AllDataType::size(def
->type_
, def
->length_
);
383 //printf("PRABA::key not updated\n");
384 free(keyStartBuffer
);
385 free(oldKeyStartBuffer
);
388 //printf("PRABA::it is wrong coming here\n");
390 if (type
== typeComposite
)
391 result
= AllDataType::compareVal(oldKeyStartBuffer
, keyStartBuffer
,
392 OpEquals
, info
->type
, info
->compLength
);
393 else result
= AllDataType::compareVal(keyPtr
, keyStartBuffer
,
394 OpEquals
, info
->type
, info
->compLength
);
396 free(keyStartBuffer
);
397 free(oldKeyStartBuffer
);
400 printDebug(DM_HashIndex
, "Updating hash index node: Key value is updated");
402 ChunkIterator citer
= CatalogTableINDEX::getIterator(indexPtr
);
404 Bucket
* buckets
= (Bucket
*)citer
.nextElement();
406 //remove the node whose key is updated
408 if (type
== typeComposite
)
409 bucketNo
= computeHashBucket(type
, oldKeyStartBuffer
, noOfBuckets
, info
->compLength
);
410 else bucketNo
= computeHashBucket(type
, keyPtr
, noOfBuckets
, info
->compLength
);
411 printDebug(DM_HashIndex
, "Updating hash index node: Bucket for old value is %d", bucketNo
);
412 Bucket
*bucket
= &buckets
[bucketNo
];
414 HashUndoLogInfo
*hInfo1
= new HashUndoLogInfo();
415 hInfo1
->metaData_
= tbl
->db_
->getMetaDataPtr();
416 hInfo1
->bucket_
= bucket
;
417 hInfo1
->tuple_
= tuple
;
418 hInfo1
->hChunk_
= indexPtr
;
419 hInfo1
->keyPtr_
= keyPtr
;
421 //it may run into deadlock, when two threads updates tuples which falls in
422 //same buckets.So take both the mutex one after another, which will reduce the
424 int ret
= bucket
->mutex_
.getLock(tbl
->db_
->procSlot
);
428 free(keyStartBuffer
);
429 free(oldKeyStartBuffer
);
430 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",bucketNo
);
431 return ErrLockTimeOut
;
433 //insert node for the updated key value
434 int newBucketNo
= computeHashBucket(type
,
435 keyStartBuffer
, noOfBuckets
, info
->compLength
);
436 printDebug(DM_HashIndex
, "Updating hash index node: Bucket for new value is %d", newBucketNo
);
438 Bucket
*bucket1
= &buckets
[newBucketNo
];
439 HashUndoLogInfo
*hInfo2
= new HashUndoLogInfo();
440 hInfo2
->metaData_
= tbl
->db_
->getMetaDataPtr();
441 hInfo2
->bucket_
= bucket
;
442 hInfo2
->tuple_
= tuple
;
443 hInfo2
->hChunk_
= ((CINDEX
*)indexPtr
)->hashNodeChunk_
;
444 hInfo2
->keyPtr_
= keyPtr
;
445 bucket1
->mutex_
.getLock(tbl
->db_
->procSlot
);
450 free(keyStartBuffer
);
451 free(oldKeyStartBuffer
);
452 printError(ErrLockTimeOut
,"Unable to acquire bucket Mutex for bucket %d",newBucketNo
);
453 return ErrLockTimeOut
;
456 HashIndexNode
*head1
= (HashIndexNode
*) bucket
->bucketList_
;
459 BucketList
list1(head1
);
460 printDebug(DM_HashIndex
, "Updating hash index node: Removing node from list with head %x", head1
);
461 list1
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
462 bucket
->bucketList_
=list1
.getBucketListHead();
466 printError(ErrSysInternal
,"Update: Bucket list is null");
467 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
468 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
471 free(keyStartBuffer
);
472 free(oldKeyStartBuffer
);
473 return ErrSysInternal
;
477 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, DeleteHashIndexOperation
, hInfo1
, sizeof(HashUndoLogInfo
));
480 BucketList
list((HashIndexNode
*) bucket
->bucketList_
);
481 rc
= list
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
482 if (rc
!=OK
) printError(ErrSysFatal
, "double failure on undo log remove followed by hash bucket list insert\n");
483 bucket
->bucketList_
= list
.getBucketListHead();
484 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
485 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
488 free(keyStartBuffer
);
489 free(oldKeyStartBuffer
);
493 HashIndexNode
*head2
= (HashIndexNode
*) bucket1
->bucketList_
;
494 //Note:: the tuple will be in the same address location
495 //so not changing the keyptr and tuple during append
496 //only bucket where this node resides will only change
497 //if the index key is updated.
501 HashIndexNode
*firstNode
= (HashIndexNode
*)(((Chunk
*)iptr
->hashNodeChunk_
)->allocate(tbl
->db_
, &rv
));
502 if (firstNode
== NULL
)
504 printError(rv
, "Error in allocating hash node");
505 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
506 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
509 free(keyStartBuffer
);
510 free(oldKeyStartBuffer
);
513 firstNode
->ptrToKey_
= keyPtr
;
514 firstNode
->ptrToTuple_
= tuple
;
515 firstNode
->next_
= NULL
;
516 bucket1
->bucketList_
= (HashIndexNode
*)firstNode
;
517 printDebug(DM_HashIndex
, "Updating hash index node: Adding new node %x:Head is empty", firstNode
);
521 BucketList
list2(head2
);
522 printDebug(DM_HashIndex
, "Updating hash index node: Adding node to list with head %x", head2
);
523 list2
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
524 bucket1
->bucketList_
= list2
.getBucketListHead();
528 rc
= tr
->appendLogicalHashUndoLog(tbl
->sysDB_
, InsertHashIndexOperation
, hInfo2
, sizeof(HashUndoLogInfo
));
531 //reverting back the changes:delete new node and add the old
532 //node + remove logical undo log of the DeleteHashIndexOperation
533 BucketList
list1((HashIndexNode
*) bucket
->bucketList_
);
534 BucketList
list2((HashIndexNode
*) bucket1
->bucketList_
);
535 list1
.insert((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
, tuple
);
536 list2
.remove((Chunk
*)iptr
->hashNodeChunk_
, tbl
->db_
, keyPtr
);
537 bucket
->bucketList_
= list1
.getBucketListHead();
538 bucket1
->bucketList_
= list2
.getBucketListHead();
539 UndoLogInfo
*logInfo
= tr
->popUndoLog();
540 Chunk
*chunk
= tbl
->sysDB_
->getSystemDatabaseChunk(UndoLogTableID
);
541 chunk
->free(tbl
->sysDB_
, logInfo
);
544 bucket1
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
545 bucket
->mutex_
.releaseLock(tbl
->db_
->procSlot
);
548 free(keyStartBuffer
);
549 free(oldKeyStartBuffer
);
553 //Following three methods are used to undo Logical Hash Indexes
554 DbRetVal
HashIndex::insertLogicalUndoLog(Database
*sysdb
, void *data
)
556 HashUndoLogInfo
*info
= (HashUndoLogInfo
*) data
;
557 Chunk
*hChunk
= (Chunk
*) info
->hChunk_
;
558 Database
*db
= new Database();
559 db
->setMetaDataPtr((DatabaseMetaData
*) info
->metaData_
);
560 db
->setProcSlot(sysdb
->procSlot
);
561 HashIndexNode
*head
= (HashIndexNode
*)((Bucket
*)info
->bucket_
)->bucketList_
;
562 BucketList
list(head
);
563 list
.insert(hChunk
, db
, info
->keyPtr_
, info
->tuple_
);
564 ((Bucket
*)info
->bucket_
)->bucketList_
= list
.getBucketListHead();
568 DbRetVal
HashIndex::deleteLogicalUndoLog(Database
*sysdb
, void *data
)
570 HashUndoLogInfo
*info
= (HashUndoLogInfo
*) data
;
571 Chunk
*hChunk
= (Chunk
*) info
->hChunk_
;
572 Database
*db
= new Database();
573 db
->setMetaDataPtr((DatabaseMetaData
*)info
->metaData_
);
574 db
->setProcSlot(sysdb
->procSlot
);
575 HashIndexNode
*head
= (HashIndexNode
*)((Bucket
*)info
->bucket_
)->bucketList_
;
576 BucketList
list(head
);
577 DbRetVal rc
= list
.remove(hChunk
, db
, info
->keyPtr_
);
579 ((Bucket
*)info
->bucket_
)->bucketList_
= list
.getBucketListHead();