Disallow empty passwords in LDAP authentication, the same way
[PostgreSQL.git] / src / backend / access / gin / ginfast.c
blob8a54a4f94fd6dd92cc00fb2f33fc9403f6616a25
1 /*-------------------------------------------------------------------------
3 * ginfast.c
4 * Fast insert routines for the Postgres inverted index access method.
5 * Pending entries are stored in linear list of pages. Later on
6 * (typically during VACUUM), ginInsertCleanup() will be invoked to
7 * transfer pending entries into the regular index structure. This
8 * wins because bulk insertion is much more efficient than retail.
10 * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
11 * Portions Copyright (c) 1994, Regents of the University of California
13 * IDENTIFICATION
14 * $PostgreSQL$
16 *-------------------------------------------------------------------------
19 #include "postgres.h"
21 #include "access/genam.h"
22 #include "access/gin.h"
23 #include "access/tuptoaster.h"
24 #include "catalog/index.h"
25 #include "commands/vacuum.h"
26 #include "miscadmin.h"
27 #include "storage/bufmgr.h"
28 #include "utils/memutils.h"
31 #define GIN_PAGE_FREESIZE \
32 ( BLCKSZ - MAXALIGN(SizeOfPageHeaderData) - MAXALIGN(sizeof(GinPageOpaqueData)) )
34 typedef struct DatumArray
36 Datum *values; /* expansible array */
37 int32 nvalues; /* current number of valid entries */
38 int32 maxvalues; /* allocated size of array */
39 } DatumArray;
43 * Build a pending-list page from the given array of tuples, and write it out.
45 static int32
46 writeListPage(Relation index, Buffer buffer,
47 IndexTuple *tuples, int32 ntuples, BlockNumber rightlink)
49 Page page = BufferGetPage(buffer);
50 int i,
51 freesize,
52 size = 0;
53 OffsetNumber l,
54 off;
55 char *workspace;
56 char *ptr;
58 /* workspace could be a local array; we use palloc for alignment */
59 workspace = palloc(BLCKSZ);
61 START_CRIT_SECTION();
63 GinInitBuffer(buffer, GIN_LIST);
65 off = FirstOffsetNumber;
66 ptr = workspace;
68 for (i = 0; i < ntuples; i++)
70 int this_size = IndexTupleSize(tuples[i]);
72 memcpy(ptr, tuples[i], this_size);
73 ptr += this_size;
74 size += this_size;
76 l = PageAddItem(page, (Item) tuples[i], this_size, off, false, false);
78 if (l == InvalidOffsetNumber)
79 elog(ERROR, "failed to add item to index page in \"%s\"",
80 RelationGetRelationName(index));
82 off++;
85 Assert(size <= BLCKSZ); /* else we overran workspace */
87 GinPageGetOpaque(page)->rightlink = rightlink;
90 * tail page may contain only the whole row(s) or final part of row placed
91 * on previous pages
93 if (rightlink == InvalidBlockNumber)
95 GinPageSetFullRow(page);
96 GinPageGetOpaque(page)->maxoff = 1;
98 else
100 GinPageGetOpaque(page)->maxoff = 0;
103 freesize = PageGetFreeSpace(page);
105 MarkBufferDirty(buffer);
107 if (!index->rd_istemp)
109 XLogRecData rdata[2];
110 ginxlogInsertListPage data;
111 XLogRecPtr recptr;
113 rdata[0].buffer = buffer;
114 rdata[0].buffer_std = true;
115 rdata[0].data = (char *) &data;
116 rdata[0].len = sizeof(ginxlogInsertListPage);
117 rdata[0].next = rdata + 1;
119 rdata[1].buffer = InvalidBuffer;
120 rdata[1].data = workspace;
121 rdata[1].len = size;
122 rdata[1].next = NULL;
124 data.blkno = BufferGetBlockNumber(buffer);
125 data.rightlink = rightlink;
126 data.ntuples = ntuples;
128 recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_INSERT_LISTPAGE, rdata);
129 PageSetLSN(page, recptr);
130 PageSetTLI(page, ThisTimeLineID);
133 UnlockReleaseBuffer(buffer);
135 END_CRIT_SECTION();
137 pfree(workspace);
139 return freesize;
142 static void
143 makeSublist(Relation index, IndexTuple *tuples, int32 ntuples,
144 GinMetaPageData *res)
146 Buffer curBuffer = InvalidBuffer;
147 Buffer prevBuffer = InvalidBuffer;
148 int i,
149 size = 0,
150 tupsize;
151 int startTuple = 0;
153 Assert(ntuples > 0);
156 * Split tuples into pages
158 for (i = 0; i < ntuples; i++)
160 if (curBuffer == InvalidBuffer)
162 curBuffer = GinNewBuffer(index);
164 if (prevBuffer != InvalidBuffer)
166 res->nPendingPages++;
167 writeListPage(index, prevBuffer,
168 tuples + startTuple, i - startTuple,
169 BufferGetBlockNumber(curBuffer));
171 else
173 res->head = BufferGetBlockNumber(curBuffer);
176 prevBuffer = curBuffer;
177 startTuple = i;
178 size = 0;
181 tupsize = MAXALIGN(IndexTupleSize(tuples[i])) + sizeof(ItemIdData);
183 if (size + tupsize >= GinListPageSize)
185 /* won't fit, force a new page and reprocess */
186 i--;
187 curBuffer = InvalidBuffer;
189 else
191 size += tupsize;
196 * Write last page
198 res->tail = BufferGetBlockNumber(curBuffer);
199 res->tailFreeSize = writeListPage(index, curBuffer,
200 tuples + startTuple, ntuples - startTuple,
201 InvalidBlockNumber);
202 res->nPendingPages++;
203 /* that was only one heap tuple */
204 res->nPendingHeapTuples = 1;
208 * Inserts collected values during normal insertion. Function guarantees
209 * that all values of heap will be stored sequentially, preserving order
211 void
212 ginHeapTupleFastInsert(Relation index, GinState *ginstate,
213 GinTupleCollector *collector)
215 Buffer metabuffer;
216 Page metapage;
217 GinMetaPageData *metadata = NULL;
218 XLogRecData rdata[2];
219 Buffer buffer = InvalidBuffer;
220 Page page = NULL;
221 ginxlogUpdateMeta data;
222 bool separateList = false;
223 bool needCleanup = false;
225 if (collector->ntuples == 0)
226 return;
228 data.node = index->rd_node;
229 data.ntuples = 0;
230 data.newRightlink = data.prevTail = InvalidBlockNumber;
232 rdata[0].buffer = InvalidBuffer;
233 rdata[0].data = (char *) &data;
234 rdata[0].len = sizeof(ginxlogUpdateMeta);
235 rdata[0].next = NULL;
237 metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
238 metapage = BufferGetPage(metabuffer);
240 if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GIN_PAGE_FREESIZE)
243 * Total size is greater than one page => make sublist
245 separateList = true;
247 else
249 LockBuffer(metabuffer, GIN_EXCLUSIVE);
250 metadata = GinPageGetMeta(metapage);
252 if (metadata->head == InvalidBlockNumber ||
253 collector->sumsize + collector->ntuples * sizeof(ItemIdData) > metadata->tailFreeSize)
256 * Pending list is empty or total size is greater than freespace
257 * on tail page => make sublist
259 * We unlock metabuffer to keep high concurrency
261 separateList = true;
262 LockBuffer(metabuffer, GIN_UNLOCK);
266 if (separateList)
268 GinMetaPageData sublist;
271 * We should make sublist separately and append it to the tail
273 memset(&sublist, 0, sizeof(GinMetaPageData));
275 makeSublist(index, collector->tuples, collector->ntuples, &sublist);
278 * metapage was unlocked, see above
280 LockBuffer(metabuffer, GIN_EXCLUSIVE);
281 metadata = GinPageGetMeta(metapage);
283 if (metadata->head == InvalidBlockNumber)
286 * Sublist becomes main list
288 START_CRIT_SECTION();
289 memcpy(metadata, &sublist, sizeof(GinMetaPageData));
290 memcpy(&data.metadata, &sublist, sizeof(GinMetaPageData));
292 else
295 * merge lists
298 data.prevTail = metadata->tail;
299 buffer = ReadBuffer(index, metadata->tail);
300 LockBuffer(buffer, GIN_EXCLUSIVE);
301 page = BufferGetPage(buffer);
302 Assert(GinPageGetOpaque(page)->rightlink == InvalidBlockNumber);
304 START_CRIT_SECTION();
306 GinPageGetOpaque(page)->rightlink = sublist.head;
307 metadata->tail = sublist.tail;
308 metadata->tailFreeSize = sublist.tailFreeSize;
310 metadata->nPendingPages += sublist.nPendingPages;
311 metadata->nPendingHeapTuples += sublist.nPendingHeapTuples;
313 memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
314 data.newRightlink = sublist.head;
316 MarkBufferDirty(buffer);
319 else
322 * Insert into tail page, metapage is already locked
325 OffsetNumber l,
326 off;
327 int i,
328 tupsize;
329 char *ptr;
331 buffer = ReadBuffer(index, metadata->tail);
332 LockBuffer(buffer, GIN_EXCLUSIVE);
333 page = BufferGetPage(buffer);
334 off = (PageIsEmpty(page)) ? FirstOffsetNumber :
335 OffsetNumberNext(PageGetMaxOffsetNumber(page));
337 rdata[0].next = rdata + 1;
339 rdata[1].buffer = buffer;
340 rdata[1].buffer_std = true;
341 ptr = rdata[1].data = (char *) palloc(collector->sumsize);
342 rdata[1].len = collector->sumsize;
343 rdata[1].next = NULL;
345 data.ntuples = collector->ntuples;
347 START_CRIT_SECTION();
350 * Increase counter of heap tuples
352 Assert(GinPageGetOpaque(page)->maxoff <= metadata->nPendingHeapTuples);
353 GinPageGetOpaque(page)->maxoff++;
354 metadata->nPendingHeapTuples++;
356 for (i = 0; i < collector->ntuples; i++)
358 tupsize = IndexTupleSize(collector->tuples[i]);
359 l = PageAddItem(page, (Item) collector->tuples[i], tupsize, off, false, false);
361 if (l == InvalidOffsetNumber)
362 elog(ERROR, "failed to add item to index page in \"%s\"",
363 RelationGetRelationName(index));
365 memcpy(ptr, collector->tuples[i], tupsize);
366 ptr += tupsize;
368 off++;
371 metadata->tailFreeSize -= collector->sumsize + collector->ntuples * sizeof(ItemIdData);
372 memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
373 MarkBufferDirty(buffer);
377 * Make real write
380 MarkBufferDirty(metabuffer);
381 if (!index->rd_istemp)
383 XLogRecPtr recptr;
385 recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_UPDATE_META_PAGE, rdata);
386 PageSetLSN(metapage, recptr);
387 PageSetTLI(metapage, ThisTimeLineID);
389 if (buffer != InvalidBuffer)
391 PageSetLSN(page, recptr);
392 PageSetTLI(page, ThisTimeLineID);
396 if (buffer != InvalidBuffer)
397 UnlockReleaseBuffer(buffer);
400 * Force pending list cleanup when it becomes too long. And,
401 * ginInsertCleanup could take significant amount of time, so we prefer to
402 * call it when it can do all the work in a single collection cycle. In
403 * non-vacuum mode, it shouldn't require maintenance_work_mem, so fire it
404 * while pending list is still small enough to fit into work_mem.
406 * ginInsertCleanup() should not be called inside our CRIT_SECTION.
408 if (metadata->nPendingPages * GIN_PAGE_FREESIZE > work_mem * 1024L)
409 needCleanup = true;
411 UnlockReleaseBuffer(metabuffer);
413 END_CRIT_SECTION();
415 if (needCleanup)
416 ginInsertCleanup(index, ginstate, false, NULL);
420 * Collect values from one tuples to be indexed. All values for
421 * one tuples should be written at once - to guarantee consistent state
423 uint32
424 ginHeapTupleFastCollect(Relation index, GinState *ginstate,
425 GinTupleCollector *collector,
426 OffsetNumber attnum, Datum value, ItemPointer item)
428 Datum *entries;
429 int32 i,
430 nentries;
432 entries = extractEntriesSU(ginstate, attnum, value, &nentries);
434 if (nentries == 0)
435 /* nothing to insert */
436 return 0;
439 * Allocate/reallocate memory for storing collected tuples
441 if (collector->tuples == NULL)
443 collector->lentuples = nentries * index->rd_att->natts;
444 collector->tuples = (IndexTuple *) palloc(sizeof(IndexTuple) * collector->lentuples);
447 while (collector->ntuples + nentries > collector->lentuples)
449 collector->lentuples *= 2;
450 collector->tuples = (IndexTuple *) repalloc(collector->tuples,
451 sizeof(IndexTuple) * collector->lentuples);
455 * Creates tuple's array
457 for (i = 0; i < nentries; i++)
459 int32 tupsize;
461 collector->tuples[collector->ntuples + i] = GinFormTuple(ginstate, attnum, entries[i], NULL, 0);
462 collector->tuples[collector->ntuples + i]->t_tid = *item;
463 tupsize = IndexTupleSize(collector->tuples[collector->ntuples + i]);
465 if (tupsize > TOAST_INDEX_TARGET || tupsize >= GinMaxItemSize)
466 elog(ERROR, "huge tuple");
468 collector->sumsize += tupsize;
471 collector->ntuples += nentries;
473 return nentries;
477 * Deletes pending list pages up to (not including) newHead page.
478 * If newHead == InvalidBlockNumber then function drops the whole list.
480 * metapage is pinned and exclusive-locked throughout this function.
482 * Returns true if another cleanup process is running concurrently
483 * (if so, we can just abandon our own efforts)
485 static bool
486 shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
487 IndexBulkDeleteResult *stats)
489 Page metapage;
490 GinMetaPageData *metadata;
491 BlockNumber blknoToDelete;
493 metapage = BufferGetPage(metabuffer);
494 metadata = GinPageGetMeta(metapage);
495 blknoToDelete = metadata->head;
499 Page page;
500 int i;
501 int64 nDeletedHeapTuples = 0;
502 ginxlogDeleteListPages data;
503 XLogRecData rdata[1];
504 Buffer buffers[GIN_NDELETE_AT_ONCE];
506 data.node = index->rd_node;
508 rdata[0].buffer = InvalidBuffer;
509 rdata[0].data = (char *) &data;
510 rdata[0].len = sizeof(ginxlogDeleteListPages);
511 rdata[0].next = NULL;
513 data.ndeleted = 0;
514 while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
516 data.toDelete[data.ndeleted] = blknoToDelete;
517 buffers[data.ndeleted] = ReadBuffer(index, blknoToDelete);
518 LockBuffer(buffers[data.ndeleted], GIN_EXCLUSIVE);
519 page = BufferGetPage(buffers[data.ndeleted]);
521 data.ndeleted++;
523 if (GinPageIsDeleted(page))
525 /* concurrent cleanup process is detected */
526 for (i = 0; i < data.ndeleted; i++)
527 UnlockReleaseBuffer(buffers[i]);
529 return true;
532 nDeletedHeapTuples += GinPageGetOpaque(page)->maxoff;
533 blknoToDelete = GinPageGetOpaque(page)->rightlink;
536 if (stats)
537 stats->pages_deleted += data.ndeleted;
539 START_CRIT_SECTION();
541 metadata->head = blknoToDelete;
543 Assert(metadata->nPendingPages >= data.ndeleted);
544 metadata->nPendingPages -= data.ndeleted;
545 Assert(metadata->nPendingHeapTuples >= nDeletedHeapTuples);
546 metadata->nPendingHeapTuples -= nDeletedHeapTuples;
548 if (blknoToDelete == InvalidBlockNumber)
550 metadata->tail = InvalidBlockNumber;
551 metadata->tailFreeSize = 0;
552 metadata->nPendingPages = 0;
553 metadata->nPendingHeapTuples = 0;
555 memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
557 MarkBufferDirty(metabuffer);
559 for (i = 0; i < data.ndeleted; i++)
561 page = BufferGetPage(buffers[i]);
562 GinPageGetOpaque(page)->flags = GIN_DELETED;
563 MarkBufferDirty(buffers[i]);
566 if (!index->rd_istemp)
568 XLogRecPtr recptr;
570 recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_LISTPAGE, rdata);
571 PageSetLSN(metapage, recptr);
572 PageSetTLI(metapage, ThisTimeLineID);
574 for (i = 0; i < data.ndeleted; i++)
576 page = BufferGetPage(buffers[i]);
577 PageSetLSN(page, recptr);
578 PageSetTLI(page, ThisTimeLineID);
582 for (i = 0; i < data.ndeleted; i++)
583 UnlockReleaseBuffer(buffers[i]);
585 END_CRIT_SECTION();
586 } while (blknoToDelete != newHead);
588 return false;
591 /* Add datum to DatumArray, resizing if needed */
592 static void
593 addDatum(DatumArray *datums, Datum datum)
595 if (datums->nvalues >= datums->maxvalues)
597 datums->maxvalues *= 2;
598 datums->values = (Datum *) repalloc(datums->values,
599 sizeof(Datum) * datums->maxvalues);
602 datums->values[datums->nvalues++] = datum;
606 * Go through all tuples >= startoff on page and collect values in memory
608 * Note that da is just workspace --- it does not carry any state across
609 * calls.
611 static void
612 processPendingPage(BuildAccumulator *accum, DatumArray *da,
613 Page page, OffsetNumber startoff)
615 ItemPointerData heapptr;
616 OffsetNumber i,
617 maxoff;
618 OffsetNumber attrnum,
619 curattnum;
621 /* reset *da to empty */
622 da->nvalues = 0;
624 maxoff = PageGetMaxOffsetNumber(page);
625 Assert(maxoff >= FirstOffsetNumber);
626 ItemPointerSetInvalid(&heapptr);
627 attrnum = 0;
629 for (i = startoff; i <= maxoff; i = OffsetNumberNext(i))
631 IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
633 curattnum = gintuple_get_attrnum(accum->ginstate, itup);
635 if (!ItemPointerIsValid(&heapptr))
637 heapptr = itup->t_tid;
638 attrnum = curattnum;
640 else if (!(ItemPointerEquals(&heapptr, &itup->t_tid) &&
641 curattnum == attrnum))
644 * We can insert several datums per call, but only for one heap
645 * tuple and one column.
647 ginInsertRecordBA(accum, &heapptr, attrnum, da->values, da->nvalues);
648 da->nvalues = 0;
649 heapptr = itup->t_tid;
650 attrnum = curattnum;
652 addDatum(da, gin_index_getattr(accum->ginstate, itup));
655 ginInsertRecordBA(accum, &heapptr, attrnum, da->values, da->nvalues);
659 * Move tuples from pending pages into regular GIN structure.
661 * This can be called concurrently by multiple backends, so it must cope.
662 * On first glance it looks completely not concurrent-safe and not crash-safe
663 * either. The reason it's okay is that multiple insertion of the same entry
664 * is detected and treated as a no-op by gininsert.c. If we crash after
665 * posting entries to the main index and before removing them from the
666 * pending list, it's okay because when we redo the posting later on, nothing
667 * bad will happen. Likewise, if two backends simultaneously try to post
668 * a pending entry into the main index, one will succeed and one will do
669 * nothing. We try to notice when someone else is a little bit ahead of
670 * us in the process, but that's just to avoid wasting cycles. Only the
671 * action of removing a page from the pending list really needs exclusive
672 * lock.
674 * vac_delay indicates that ginInsertCleanup is called from vacuum process,
675 * so call vacuum_delay_point() periodically.
676 * If stats isn't null, we count deleted pending pages into the counts.
678 void
679 ginInsertCleanup(Relation index, GinState *ginstate,
680 bool vac_delay, IndexBulkDeleteResult *stats)
682 Buffer metabuffer,
683 buffer;
684 Page metapage,
685 page;
686 GinMetaPageData *metadata;
687 MemoryContext opCtx,
688 oldCtx;
689 BuildAccumulator accum;
690 DatumArray datums;
691 BlockNumber blkno;
693 metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
694 LockBuffer(metabuffer, GIN_SHARE);
695 metapage = BufferGetPage(metabuffer);
696 metadata = GinPageGetMeta(metapage);
698 if (metadata->head == InvalidBlockNumber)
700 /* Nothing to do */
701 UnlockReleaseBuffer(metabuffer);
702 return;
706 * Read and lock head of pending list
708 blkno = metadata->head;
709 buffer = ReadBuffer(index, blkno);
710 LockBuffer(buffer, GIN_SHARE);
711 page = BufferGetPage(buffer);
713 LockBuffer(metabuffer, GIN_UNLOCK);
716 * Initialize. All temporary space will be in opCtx
718 opCtx = AllocSetContextCreate(CurrentMemoryContext,
719 "GIN insert cleanup temporary context",
720 ALLOCSET_DEFAULT_MINSIZE,
721 ALLOCSET_DEFAULT_INITSIZE,
722 ALLOCSET_DEFAULT_MAXSIZE);
724 oldCtx = MemoryContextSwitchTo(opCtx);
726 datums.maxvalues = 128;
727 datums.nvalues = 0;
728 datums.values = (Datum *) palloc(sizeof(Datum) * datums.maxvalues);
730 ginInitBA(&accum);
731 accum.ginstate = ginstate;
734 * At the top of this loop, we have pin and lock on the current page of
735 * the pending list. However, we'll release that before exiting the loop.
736 * Note we also have pin but not lock on the metapage.
738 for (;;)
740 if (GinPageIsDeleted(page))
742 /* another cleanup process is running concurrently */
743 UnlockReleaseBuffer(buffer);
744 break;
748 * read page's datums into memory
750 processPendingPage(&accum, &datums, page, FirstOffsetNumber);
752 if (vac_delay)
753 vacuum_delay_point();
756 * Is it time to flush memory to disk? Flush if we are at the end of
757 * the pending list, or if we have a full row and memory is getting
758 * full.
760 * XXX using up maintenance_work_mem here is probably unreasonably
761 * much, since vacuum might already be using that much.
763 if (GinPageGetOpaque(page)->rightlink == InvalidBlockNumber ||
764 (GinPageHasFullRow(page) &&
765 (accum.allocatedMemory >= maintenance_work_mem * 1024L ||
766 accum.maxdepth > GIN_MAX_TREE_DEPTH)))
768 ItemPointerData *list;
769 uint32 nlist;
770 Datum entry;
771 OffsetNumber maxoff,
772 attnum;
775 * Unlock current page to increase performance. Changes of page
776 * will be checked later by comparing maxoff after completion of
777 * memory flush.
779 maxoff = PageGetMaxOffsetNumber(page);
780 LockBuffer(buffer, GIN_UNLOCK);
783 * Moving collected data into regular structure can take
784 * significant amount of time - so, run it without locking pending
785 * list.
787 while ((list = ginGetEntry(&accum, &attnum, &entry, &nlist)) != NULL)
789 ginEntryInsert(index, ginstate, attnum, entry, list, nlist, FALSE);
790 if (vac_delay)
791 vacuum_delay_point();
795 * Lock the whole list to remove pages
797 LockBuffer(metabuffer, GIN_EXCLUSIVE);
798 LockBuffer(buffer, GIN_SHARE);
800 if (GinPageIsDeleted(page))
802 /* another cleanup process is running concurrently */
803 UnlockReleaseBuffer(buffer);
804 LockBuffer(metabuffer, GIN_UNLOCK);
805 break;
809 * While we left the page unlocked, more stuff might have gotten
810 * added to it. If so, process those entries immediately. There
811 * shouldn't be very many, so we don't worry about the fact that
812 * we're doing this with exclusive lock. Insertion algorithm
813 * gurantees that inserted row(s) will not continue on next page.
814 * NOTE: intentionally no vacuum_delay_point in this loop.
816 if (PageGetMaxOffsetNumber(page) != maxoff)
818 ginInitBA(&accum);
819 processPendingPage(&accum, &datums, page, maxoff + 1);
821 while ((list = ginGetEntry(&accum, &attnum, &entry, &nlist)) != NULL)
822 ginEntryInsert(index, ginstate, attnum, entry, list, nlist, FALSE);
826 * Remember next page - it will become the new list head
828 blkno = GinPageGetOpaque(page)->rightlink;
829 UnlockReleaseBuffer(buffer); /* shiftList will do exclusive
830 * locking */
833 * remove readed pages from pending list, at this point all
834 * content of readed pages is in regular structure
836 if (shiftList(index, metabuffer, blkno, stats))
838 /* another cleanup process is running concurrently */
839 LockBuffer(metabuffer, GIN_UNLOCK);
840 break;
843 Assert(blkno == metadata->head);
844 LockBuffer(metabuffer, GIN_UNLOCK);
847 * if we removed the whole pending list just exit
849 if (blkno == InvalidBlockNumber)
850 break;
853 * release memory used so far and reinit state
855 MemoryContextReset(opCtx);
856 ginInitBA(&accum);
857 datums.nvalues = 0;
858 datums.values = (Datum *) palloc(sizeof(Datum) * datums.maxvalues);
860 else
862 blkno = GinPageGetOpaque(page)->rightlink;
863 UnlockReleaseBuffer(buffer);
867 * Read next page in pending list
869 CHECK_FOR_INTERRUPTS();
870 buffer = ReadBuffer(index, blkno);
871 LockBuffer(buffer, GIN_SHARE);
872 page = BufferGetPage(buffer);
875 ReleaseBuffer(metabuffer);
877 /* Clean up temporary space */
878 MemoryContextSwitchTo(oldCtx);
879 MemoryContextDelete(opCtx);