1 /*-------------------------------------------------------------------------
4 * backend portal memory management
6 * Portals are objects representing the execution state of a query.
7 * This module provides memory management services for portals, but it
8 * doesn't actually run the executor for them.
11 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
12 * Portions Copyright (c) 1994, Regents of the University of California
17 *-------------------------------------------------------------------------
21 #include "access/xact.h"
22 #include "catalog/pg_type.h"
23 #include "commands/portalcmds.h"
24 #include "miscadmin.h"
25 #include "utils/builtins.h"
26 #include "utils/memutils.h"
29 * Estimate of the maximum number of open portals a user would have,
30 * used in initially sizing the PortalHashTable in EnablePortalManager().
31 * Since the hash table can expand, there's no need to make this overly
32 * generous, and keeping it small avoids unnecessary overhead in the
33 * hash_seq_search() calls executed during transaction end.
35 #define PORTALS_PER_USER 16
43 #define MAX_PORTALNAME_LEN NAMEDATALEN
45 typedef struct portalhashent
47 char portalname
[MAX_PORTALNAME_LEN
];
51 static HTAB
*PortalHashTable
= NULL
;
53 #define PortalHashTableLookup(NAME, PORTAL) \
55 PortalHashEnt *hentry; \
57 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
58 (NAME), HASH_FIND, NULL); \
60 PORTAL = hentry->portal; \
65 #define PortalHashTableInsert(PORTAL, NAME) \
67 PortalHashEnt *hentry; bool found; \
69 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
70 (NAME), HASH_ENTER, &found); \
72 elog(ERROR, "duplicate portal name"); \
73 hentry->portal = PORTAL; \
74 /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
75 PORTAL->name = hentry->portalname; \
78 #define PortalHashTableDelete(PORTAL) \
80 PortalHashEnt *hentry; \
82 hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
83 PORTAL->name, HASH_REMOVE, NULL); \
85 elog(WARNING, "trying to delete portal name that does not exist"); \
88 static MemoryContext PortalMemory
= NULL
;
91 /* ----------------------------------------------------------------
92 * public portal interface functions
93 * ----------------------------------------------------------------
98 * Enables the portal management module at backend startup.
101 EnablePortalManager(void)
105 Assert(PortalMemory
== NULL
);
107 PortalMemory
= AllocSetContextCreate(TopMemoryContext
,
109 ALLOCSET_DEFAULT_MINSIZE
,
110 ALLOCSET_DEFAULT_INITSIZE
,
111 ALLOCSET_DEFAULT_MAXSIZE
);
113 ctl
.keysize
= MAX_PORTALNAME_LEN
;
114 ctl
.entrysize
= sizeof(PortalHashEnt
);
117 * use PORTALS_PER_USER as a guess of how many hash table entries to
120 PortalHashTable
= hash_create("Portal hash", PORTALS_PER_USER
,
126 * Returns a portal given a portal name, or NULL if name not found.
129 GetPortalByName(const char *name
)
133 if (PointerIsValid(name
))
134 PortalHashTableLookup(name
, portal
);
142 * PortalListGetPrimaryStmt
143 * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145 * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
146 * portal are marked canSetTag, returns the first one. Neither of these
147 * cases should occur in present usages of this function.
149 * Copes if given a list of Querys --- can't happen in a portal, but this
150 * code also supports plancache.c, which needs both cases.
152 * Note: the reason this is just handed a List is so that plancache.c
153 * can share the code. For use with a portal, use PortalGetPrimaryStmt
154 * rather than calling this directly.
157 PortalListGetPrimaryStmt(List
*stmts
)
163 Node
*stmt
= (Node
*) lfirst(lc
);
165 if (IsA(stmt
, PlannedStmt
))
167 if (((PlannedStmt
*) stmt
)->canSetTag
)
170 else if (IsA(stmt
, Query
))
172 if (((Query
*) stmt
)->canSetTag
)
177 /* Utility stmts are assumed canSetTag if they're the only stmt */
178 if (list_length(stmts
) == 1)
187 * Returns a new portal given a name.
189 * allowDup: if true, automatically drop any pre-existing portal of the
190 * same name (if false, an error is raised).
192 * dupSilent: if true, don't even emit a WARNING.
195 CreatePortal(const char *name
, bool allowDup
, bool dupSilent
)
199 AssertArg(PointerIsValid(name
));
201 portal
= GetPortalByName(name
);
202 if (PortalIsValid(portal
))
206 (errcode(ERRCODE_DUPLICATE_CURSOR
),
207 errmsg("cursor \"%s\" already exists", name
)));
210 (errcode(ERRCODE_DUPLICATE_CURSOR
),
211 errmsg("closing existing cursor \"%s\"",
213 PortalDrop(portal
, false);
216 /* make new portal structure */
217 portal
= (Portal
) MemoryContextAllocZero(PortalMemory
, sizeof *portal
);
219 /* initialize portal heap context; typically it won't store much */
220 portal
->heap
= AllocSetContextCreate(PortalMemory
,
222 ALLOCSET_SMALL_MINSIZE
,
223 ALLOCSET_SMALL_INITSIZE
,
224 ALLOCSET_SMALL_MAXSIZE
);
226 /* create a resource owner for the portal */
227 portal
->resowner
= ResourceOwnerCreate(CurTransactionResourceOwner
,
230 /* initialize portal fields that don't start off zero */
231 portal
->status
= PORTAL_NEW
;
232 portal
->cleanup
= PortalCleanup
;
233 portal
->createSubid
= GetCurrentSubTransactionId();
234 portal
->strategy
= PORTAL_MULTI_QUERY
;
235 portal
->cursorOptions
= CURSOR_OPT_NO_SCROLL
;
236 portal
->atStart
= true;
237 portal
->atEnd
= true; /* disallow fetches until query is set */
238 portal
->visible
= true;
239 portal
->creation_time
= GetCurrentStatementStartTimestamp();
241 /* put portal in table (sets portal->name) */
242 PortalHashTableInsert(portal
, name
);
249 * Create a new portal, assigning it a random nonconflicting name.
252 CreateNewPortal(void)
254 static unsigned int unnamed_portal_count
= 0;
256 char portalname
[MAX_PORTALNAME_LEN
];
258 /* Select a nonconflicting name */
261 unnamed_portal_count
++;
262 sprintf(portalname
, "<unnamed portal %u>", unnamed_portal_count
);
263 if (GetPortalByName(portalname
) == NULL
)
267 return CreatePortal(portalname
, false, false);
272 * A simple subroutine to establish a portal's query.
274 * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
275 * allowed anymore to pass NULL. (If you really don't have source text,
276 * you can pass a constant string, perhaps "(query not available)".)
278 * commandTag shall be NULL if and only if the original query string
279 * (before rewriting) was an empty string. Also, the passed commandTag must
280 * be a pointer to a constant string, since it is not copied.
282 * If cplan is provided, then it is a cached plan containing the stmts,
283 * and the caller must have done RevalidateCachedPlan(), causing a refcount
284 * increment. The refcount will be released when the portal is destroyed.
286 * If cplan is NULL, then it is the caller's responsibility to ensure that
287 * the passed plan trees have adequate lifetime. Typically this is done by
288 * copying them into the portal's heap context.
290 * The caller is also responsible for ensuring that the passed prepStmtName
291 * (if not NULL) and sourceText have adequate lifetime.
293 * NB: this function mustn't do much beyond storing the passed values; in
294 * particular don't do anything that risks elog(ERROR). If that were to
295 * happen here before storing the cplan reference, we'd leak the plancache
296 * refcount that the caller is trying to hand off to us.
299 PortalDefineQuery(Portal portal
,
300 const char *prepStmtName
,
301 const char *sourceText
,
302 const char *commandTag
,
306 AssertArg(PortalIsValid(portal
));
307 AssertState(portal
->status
== PORTAL_NEW
);
309 AssertArg(sourceText
!= NULL
);
310 AssertArg(commandTag
!= NULL
|| stmts
== NIL
);
312 portal
->prepStmtName
= prepStmtName
;
313 portal
->sourceText
= sourceText
;
314 portal
->commandTag
= commandTag
;
315 portal
->stmts
= stmts
;
316 portal
->cplan
= cplan
;
317 portal
->status
= PORTAL_DEFINED
;
321 * PortalReleaseCachedPlan
322 * Release a portal's reference to its cached plan, if any.
325 PortalReleaseCachedPlan(Portal portal
)
329 ReleaseCachedPlan(portal
->cplan
, false);
330 portal
->cplan
= NULL
;
335 * PortalCreateHoldStore
336 * Create the tuplestore for a portal.
339 PortalCreateHoldStore(Portal portal
)
341 MemoryContext oldcxt
;
343 Assert(portal
->holdContext
== NULL
);
344 Assert(portal
->holdStore
== NULL
);
347 * Create the memory context that is used for storage of the tuple set.
348 * Note this is NOT a child of the portal's heap memory.
350 portal
->holdContext
=
351 AllocSetContextCreate(PortalMemory
,
353 ALLOCSET_DEFAULT_MINSIZE
,
354 ALLOCSET_DEFAULT_INITSIZE
,
355 ALLOCSET_DEFAULT_MAXSIZE
);
358 * Create the tuple store, selecting cross-transaction temp files, and
359 * enabling random access only if cursor requires scrolling.
361 * XXX: Should maintenance_work_mem be used for the portal size?
363 oldcxt
= MemoryContextSwitchTo(portal
->holdContext
);
366 tuplestore_begin_heap(portal
->cursorOptions
& CURSOR_OPT_SCROLL
,
369 MemoryContextSwitchTo(oldcxt
);
374 * Destroy the portal.
377 PortalDrop(Portal portal
, bool isTopCommit
)
379 AssertArg(PortalIsValid(portal
));
381 /* Not sure if this case can validly happen or not... */
382 if (portal
->status
== PORTAL_ACTIVE
)
383 elog(ERROR
, "cannot drop active portal");
386 * Remove portal from hash table. Because we do this first, we will not
387 * come back to try to remove the portal again if there's any error in the
388 * subsequent steps. Better to leak a little memory than to get into an
389 * infinite error-recovery loop.
391 PortalHashTableDelete(portal
);
393 /* let portalcmds.c clean up the state it knows about */
394 if (PointerIsValid(portal
->cleanup
))
395 (*portal
->cleanup
) (portal
);
397 /* drop cached plan reference, if any */
399 PortalReleaseCachedPlan(portal
);
402 * Release any resources still attached to the portal. There are several
403 * cases being covered here:
405 * Top transaction commit (indicated by isTopCommit): normally we should
406 * do nothing here and let the regular end-of-transaction resource
407 * releasing mechanism handle these resources too. However, if we have a
408 * FAILED portal (eg, a cursor that got an error), we'd better clean up
409 * its resources to avoid resource-leakage warning messages.
411 * Sub transaction commit: never comes here at all, since we don't kill
412 * any portals in AtSubCommit_Portals().
414 * Main or sub transaction abort: we will do nothing here because
415 * portal->resowner was already set NULL; the resources were already
416 * cleaned up in transaction abort.
418 * Ordinary portal drop: must release resources. However, if the portal
419 * is not FAILED then we do not release its locks. The locks become the
420 * responsibility of the transaction's ResourceOwner (since it is the
421 * parent of the portal's owner) and will be released when the transaction
424 if (portal
->resowner
&&
425 (!isTopCommit
|| portal
->status
== PORTAL_FAILED
))
427 bool isCommit
= (portal
->status
!= PORTAL_FAILED
);
429 ResourceOwnerRelease(portal
->resowner
,
430 RESOURCE_RELEASE_BEFORE_LOCKS
,
432 ResourceOwnerRelease(portal
->resowner
,
433 RESOURCE_RELEASE_LOCKS
,
435 ResourceOwnerRelease(portal
->resowner
,
436 RESOURCE_RELEASE_AFTER_LOCKS
,
438 ResourceOwnerDelete(portal
->resowner
);
440 portal
->resowner
= NULL
;
443 * Delete tuplestore if present. We should do this even under error
444 * conditions; since the tuplestore would have been using cross-
445 * transaction storage, its temp files need to be explicitly deleted.
447 if (portal
->holdStore
)
449 MemoryContext oldcontext
;
451 oldcontext
= MemoryContextSwitchTo(portal
->holdContext
);
452 tuplestore_end(portal
->holdStore
);
453 MemoryContextSwitchTo(oldcontext
);
454 portal
->holdStore
= NULL
;
457 /* delete tuplestore storage, if any */
458 if (portal
->holdContext
)
459 MemoryContextDelete(portal
->holdContext
);
461 /* release subsidiary storage */
462 MemoryContextDelete(PortalGetHeapMemory(portal
));
464 /* release portal struct (it's in PortalMemory) */
469 * Delete all declared cursors.
471 * Used by commands: CLOSE ALL, DISCARD ALL
474 PortalHashTableDeleteAll(void)
476 HASH_SEQ_STATUS status
;
477 PortalHashEnt
*hentry
;
479 if (PortalHashTable
== NULL
)
482 hash_seq_init(&status
, PortalHashTable
);
483 while ((hentry
= hash_seq_search(&status
)) != NULL
)
485 Portal portal
= hentry
->portal
;
487 if (portal
->status
!= PORTAL_ACTIVE
)
488 PortalDrop(portal
, false);
494 * Pre-commit processing for portals.
496 * Any holdable cursors created in this transaction need to be converted to
497 * materialized form, since we are going to close down the executor and
498 * release locks. Other portals are not touched yet.
500 * Returns TRUE if any holdable cursors were processed, FALSE if not.
503 CommitHoldablePortals(void)
506 HASH_SEQ_STATUS status
;
507 PortalHashEnt
*hentry
;
509 hash_seq_init(&status
, PortalHashTable
);
511 while ((hentry
= (PortalHashEnt
*) hash_seq_search(&status
)) != NULL
)
513 Portal portal
= hentry
->portal
;
515 /* Is it a holdable portal created in the current xact? */
516 if ((portal
->cursorOptions
& CURSOR_OPT_HOLD
) &&
517 portal
->createSubid
!= InvalidSubTransactionId
&&
518 portal
->status
== PORTAL_READY
)
521 * We are exiting the transaction that created a holdable cursor.
522 * Instead of dropping the portal, prepare it for access by later
525 * Note that PersistHoldablePortal() must release all resources
526 * used by the portal that are local to the creating transaction.
528 PortalCreateHoldStore(portal
);
529 PersistHoldablePortal(portal
);
531 /* drop cached plan reference, if any */
533 PortalReleaseCachedPlan(portal
);
536 * Any resources belonging to the portal will be released in the
537 * upcoming transaction-wide cleanup; the portal will no longer
538 * have its own resources.
540 portal
->resowner
= NULL
;
543 * Having successfully exported the holdable cursor, mark it as
544 * not belonging to this transaction.
546 portal
->createSubid
= InvalidSubTransactionId
;
556 * Pre-prepare processing for portals.
558 * Currently we refuse PREPARE if the transaction created any holdable
559 * cursors, since it's quite unclear what to do with one. However, this
560 * has the same API as CommitHoldablePortals and is invoked in the same
561 * way by xact.c, so that we can easily do something reasonable if anyone
562 * comes up with something reasonable to do.
564 * Returns TRUE if any holdable cursors were processed, FALSE if not.
567 PrepareHoldablePortals(void)
570 HASH_SEQ_STATUS status
;
571 PortalHashEnt
*hentry
;
573 hash_seq_init(&status
, PortalHashTable
);
575 while ((hentry
= (PortalHashEnt
*) hash_seq_search(&status
)) != NULL
)
577 Portal portal
= hentry
->portal
;
579 /* Is it a holdable portal created in the current xact? */
580 if ((portal
->cursorOptions
& CURSOR_OPT_HOLD
) &&
581 portal
->createSubid
!= InvalidSubTransactionId
&&
582 portal
->status
== PORTAL_READY
)
585 * We are exiting the transaction that created a holdable cursor.
589 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
590 errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
598 * Pre-commit processing for portals.
600 * Remove all non-holdable portals created in this transaction.
601 * Portals remaining from prior transactions should be left untouched.
604 AtCommit_Portals(void)
606 HASH_SEQ_STATUS status
;
607 PortalHashEnt
*hentry
;
609 hash_seq_init(&status
, PortalHashTable
);
611 while ((hentry
= (PortalHashEnt
*) hash_seq_search(&status
)) != NULL
)
613 Portal portal
= hentry
->portal
;
616 * Do not touch active portals --- this can only happen in the case of
617 * a multi-transaction utility command, such as VACUUM.
619 * Note however that any resource owner attached to such a portal is
620 * still going to go away, so don't leave a dangling pointer.
622 if (portal
->status
== PORTAL_ACTIVE
)
624 portal
->resowner
= NULL
;
629 * Do nothing to cursors held over from a previous transaction
630 * (including holdable ones just frozen by CommitHoldablePortals).
632 if (portal
->createSubid
== InvalidSubTransactionId
)
635 /* Zap all non-holdable portals */
636 PortalDrop(portal
, true);
638 /* Restart the iteration in case that led to other drops */
639 /* XXX is this really necessary? */
640 hash_seq_term(&status
);
641 hash_seq_init(&status
, PortalHashTable
);
646 * Abort processing for portals.
648 * At this point we reset "active" status and run the cleanup hook if
649 * present, but we can't release the portal's memory until the cleanup call.
651 * The reason we need to reset active is so that we can replace the unnamed
652 * portal, else we'll fail to execute ROLLBACK when it arrives.
655 AtAbort_Portals(void)
657 HASH_SEQ_STATUS status
;
658 PortalHashEnt
*hentry
;
660 hash_seq_init(&status
, PortalHashTable
);
662 while ((hentry
= (PortalHashEnt
*) hash_seq_search(&status
)) != NULL
)
664 Portal portal
= hentry
->portal
;
666 if (portal
->status
== PORTAL_ACTIVE
)
667 portal
->status
= PORTAL_FAILED
;
670 * Do nothing else to cursors held over from a previous transaction.
672 if (portal
->createSubid
== InvalidSubTransactionId
)
675 /* let portalcmds.c clean up the state it knows about */
676 if (PointerIsValid(portal
->cleanup
))
678 (*portal
->cleanup
) (portal
);
679 portal
->cleanup
= NULL
;
682 /* drop cached plan reference, if any */
684 PortalReleaseCachedPlan(portal
);
687 * Any resources belonging to the portal will be released in the
688 * upcoming transaction-wide cleanup; they will be gone before we run
691 portal
->resowner
= NULL
;
694 * Although we can't delete the portal data structure proper, we can
695 * release any memory in subsidiary contexts, such as executor state.
696 * The cleanup hook was the last thing that might have needed data
699 MemoryContextDeleteChildren(PortalGetHeapMemory(portal
));
704 * Post-abort cleanup for portals.
706 * Delete all portals not held over from prior transactions. */
708 AtCleanup_Portals(void)
710 HASH_SEQ_STATUS status
;
711 PortalHashEnt
*hentry
;
713 hash_seq_init(&status
, PortalHashTable
);
715 while ((hentry
= (PortalHashEnt
*) hash_seq_search(&status
)) != NULL
)
717 Portal portal
= hentry
->portal
;
719 /* Do nothing to cursors held over from a previous transaction */
720 if (portal
->createSubid
== InvalidSubTransactionId
)
722 Assert(portal
->status
!= PORTAL_ACTIVE
);
723 Assert(portal
->resowner
== NULL
);
728 PortalDrop(portal
, false);
733 * Pre-subcommit processing for portals.
735 * Reassign the portals created in the current subtransaction to the parent
739 AtSubCommit_Portals(SubTransactionId mySubid
,
740 SubTransactionId parentSubid
,
741 ResourceOwner parentXactOwner
)
743 HASH_SEQ_STATUS status
;
744 PortalHashEnt
*hentry
;
746 hash_seq_init(&status
, PortalHashTable
);
748 while ((hentry
= (PortalHashEnt
*) hash_seq_search(&status
)) != NULL
)
750 Portal portal
= hentry
->portal
;
752 if (portal
->createSubid
== mySubid
)
754 portal
->createSubid
= parentSubid
;
755 if (portal
->resowner
)
756 ResourceOwnerNewParent(portal
->resowner
, parentXactOwner
);
762 * Subtransaction abort handling for portals.
764 * Deactivate portals created during the failed subtransaction.
765 * Note that per AtSubCommit_Portals, this will catch portals created
766 * in descendants of the subtransaction too.
768 * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
771 AtSubAbort_Portals(SubTransactionId mySubid
,
772 SubTransactionId parentSubid
,
773 ResourceOwner parentXactOwner
)
775 HASH_SEQ_STATUS status
;
776 PortalHashEnt
*hentry
;
778 hash_seq_init(&status
, PortalHashTable
);
780 while ((hentry
= (PortalHashEnt
*) hash_seq_search(&status
)) != NULL
)
782 Portal portal
= hentry
->portal
;
784 if (portal
->createSubid
!= mySubid
)
788 * Force any active portals of my own transaction into FAILED state.
789 * This is mostly to ensure that a portal running a FETCH will go
790 * FAILED if the underlying cursor fails. (Note we do NOT want to do
791 * this to upper-level portals, since they may be able to continue.)
793 * This is only needed to dodge the sanity check in PortalDrop.
795 if (portal
->status
== PORTAL_ACTIVE
)
796 portal
->status
= PORTAL_FAILED
;
799 * If the portal is READY then allow it to survive into the parent
800 * transaction; otherwise shut it down.
802 * Currently, we can't actually support that because the portal's
803 * query might refer to objects created or changed in the failed
804 * subtransaction, leading to crashes if execution is resumed. So,
805 * even READY portals are deleted. It would be nice to detect whether
806 * the query actually depends on any such object, instead.
809 if (portal
->status
== PORTAL_READY
)
811 portal
->createSubid
= parentSubid
;
812 if (portal
->resowner
)
813 ResourceOwnerNewParent(portal
->resowner
, parentXactOwner
);
818 /* let portalcmds.c clean up the state it knows about */
819 if (PointerIsValid(portal
->cleanup
))
821 (*portal
->cleanup
) (portal
);
822 portal
->cleanup
= NULL
;
825 /* drop cached plan reference, if any */
827 PortalReleaseCachedPlan(portal
);
830 * Any resources belonging to the portal will be released in the
831 * upcoming transaction-wide cleanup; they will be gone before we
834 portal
->resowner
= NULL
;
837 * Although we can't delete the portal data structure proper, we
838 * can release any memory in subsidiary contexts, such as executor
839 * state. The cleanup hook was the last thing that might have
842 MemoryContextDeleteChildren(PortalGetHeapMemory(portal
));
848 * Post-subabort cleanup for portals.
850 * Drop all portals created in the failed subtransaction (but note that
851 * we will not drop any that were reassigned to the parent above).
854 AtSubCleanup_Portals(SubTransactionId mySubid
)
856 HASH_SEQ_STATUS status
;
857 PortalHashEnt
*hentry
;
859 hash_seq_init(&status
, PortalHashTable
);
861 while ((hentry
= (PortalHashEnt
*) hash_seq_search(&status
)) != NULL
)
863 Portal portal
= hentry
->portal
;
865 if (portal
->createSubid
!= mySubid
)
869 PortalDrop(portal
, false);
873 /* Find all available cursors */
875 pg_cursor(PG_FUNCTION_ARGS
)
877 ReturnSetInfo
*rsinfo
= (ReturnSetInfo
*) fcinfo
->resultinfo
;
879 Tuplestorestate
*tupstore
;
880 MemoryContext per_query_ctx
;
881 MemoryContext oldcontext
;
882 HASH_SEQ_STATUS hash_seq
;
883 PortalHashEnt
*hentry
;
885 /* check to see if caller supports us returning a tuplestore */
886 if (rsinfo
== NULL
|| !IsA(rsinfo
, ReturnSetInfo
))
888 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
889 errmsg("set-valued function called in context that cannot accept a set")));
890 if (!(rsinfo
->allowedModes
& SFRM_Materialize
))
892 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED
),
893 errmsg("materialize mode required, but it is not " \
894 "allowed in this context")));
896 /* need to build tuplestore in query context */
897 per_query_ctx
= rsinfo
->econtext
->ecxt_per_query_memory
;
898 oldcontext
= MemoryContextSwitchTo(per_query_ctx
);
901 * build tupdesc for result tuples. This must match the definition of the
902 * pg_cursors view in system_views.sql
904 tupdesc
= CreateTemplateTupleDesc(6, false);
905 TupleDescInitEntry(tupdesc
, (AttrNumber
) 1, "name",
907 TupleDescInitEntry(tupdesc
, (AttrNumber
) 2, "statement",
909 TupleDescInitEntry(tupdesc
, (AttrNumber
) 3, "is_holdable",
911 TupleDescInitEntry(tupdesc
, (AttrNumber
) 4, "is_binary",
913 TupleDescInitEntry(tupdesc
, (AttrNumber
) 5, "is_scrollable",
915 TupleDescInitEntry(tupdesc
, (AttrNumber
) 6, "creation_time",
916 TIMESTAMPTZOID
, -1, 0);
919 * We put all the tuples into a tuplestore in one scan of the hashtable.
920 * This avoids any issue of the hashtable possibly changing between calls.
923 tuplestore_begin_heap(rsinfo
->allowedModes
& SFRM_Materialize_Random
,
926 hash_seq_init(&hash_seq
, PortalHashTable
);
927 while ((hentry
= hash_seq_search(&hash_seq
)) != NULL
)
929 Portal portal
= hentry
->portal
;
933 /* report only "visible" entries */
934 if (!portal
->visible
)
937 /* generate junk in short-term context */
938 MemoryContextSwitchTo(oldcontext
);
940 MemSet(nulls
, 0, sizeof(nulls
));
942 values
[0] = CStringGetTextDatum(portal
->name
);
943 values
[1] = CStringGetTextDatum(portal
->sourceText
);
944 values
[2] = BoolGetDatum(portal
->cursorOptions
& CURSOR_OPT_HOLD
);
945 values
[3] = BoolGetDatum(portal
->cursorOptions
& CURSOR_OPT_BINARY
);
946 values
[4] = BoolGetDatum(portal
->cursorOptions
& CURSOR_OPT_SCROLL
);
947 values
[5] = TimestampTzGetDatum(portal
->creation_time
);
949 /* switch to appropriate context while storing the tuple */
950 MemoryContextSwitchTo(per_query_ctx
);
951 tuplestore_putvalues(tupstore
, tupdesc
, values
, nulls
);
954 /* clean up and return the tuplestore */
955 tuplestore_donestoring(tupstore
);
957 MemoryContextSwitchTo(oldcontext
);
959 rsinfo
->returnMode
= SFRM_Materialize
;
960 rsinfo
->setResult
= tupstore
;
961 rsinfo
->setDesc
= tupdesc
;