Force a checkpoint in CREATE DATABASE before starting to copy the files,
[PostgreSQL.git] / src / backend / access / common / indextuple.c
blob487185ca9609a91c22d423e8927dad4765d46883
1 /*-------------------------------------------------------------------------
3 * indextuple.c
4 * This file contains index tuple accessor and mutator routines,
5 * as well as various tuple utilities.
7 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
11 * IDENTIFICATION
12 * $PostgreSQL$
14 *-------------------------------------------------------------------------
17 #include "postgres.h"
19 #include "access/heapam.h"
20 #include "access/itup.h"
21 #include "access/tuptoaster.h"
24 /* ----------------------------------------------------------------
25 * index_ tuple interface routines
26 * ----------------------------------------------------------------
29 /* ----------------
30 * index_form_tuple
31 * ----------------
33 IndexTuple
34 index_form_tuple(TupleDesc tupleDescriptor,
35 Datum *values,
36 bool *isnull)
38 char *tp; /* tuple pointer */
39 IndexTuple tuple; /* return tuple */
40 Size size,
41 data_size,
42 hoff;
43 int i;
44 unsigned short infomask = 0;
45 bool hasnull = false;
46 uint16 tupmask = 0;
47 int numberOfAttributes = tupleDescriptor->natts;
49 #ifdef TOAST_INDEX_HACK
50 Datum untoasted_values[INDEX_MAX_KEYS];
51 bool untoasted_free[INDEX_MAX_KEYS];
52 #endif
54 if (numberOfAttributes > INDEX_MAX_KEYS)
55 ereport(ERROR,
56 (errcode(ERRCODE_TOO_MANY_COLUMNS),
57 errmsg("number of index columns (%d) exceeds limit (%d)",
58 numberOfAttributes, INDEX_MAX_KEYS)));
60 #ifdef TOAST_INDEX_HACK
61 for (i = 0; i < numberOfAttributes; i++)
63 Form_pg_attribute att = tupleDescriptor->attrs[i];
65 untoasted_values[i] = values[i];
66 untoasted_free[i] = false;
68 /* Do nothing if value is NULL or not of varlena type */
69 if (isnull[i] || att->attlen != -1)
70 continue;
73 * If value is stored EXTERNAL, must fetch it so we are not depending
74 * on outside storage. This should be improved someday.
76 if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
78 untoasted_values[i] =
79 PointerGetDatum(heap_tuple_fetch_attr((struct varlena *)
80 DatumGetPointer(values[i])));
81 untoasted_free[i] = true;
85 * If value is above size target, and is of a compressible datatype,
86 * try to compress it in-line.
88 if (!VARATT_IS_EXTENDED(DatumGetPointer(untoasted_values[i])) &&
89 VARSIZE(DatumGetPointer(untoasted_values[i])) > TOAST_INDEX_TARGET &&
90 (att->attstorage == 'x' || att->attstorage == 'm'))
92 Datum cvalue = toast_compress_datum(untoasted_values[i]);
94 if (DatumGetPointer(cvalue) != NULL)
96 /* successful compression */
97 if (untoasted_free[i])
98 pfree(DatumGetPointer(untoasted_values[i]));
99 untoasted_values[i] = cvalue;
100 untoasted_free[i] = true;
104 #endif
106 for (i = 0; i < numberOfAttributes; i++)
108 if (isnull[i])
110 hasnull = true;
111 break;
115 if (hasnull)
116 infomask |= INDEX_NULL_MASK;
118 hoff = IndexInfoFindDataOffset(infomask);
119 #ifdef TOAST_INDEX_HACK
120 data_size = heap_compute_data_size(tupleDescriptor,
121 untoasted_values, isnull);
122 #else
123 data_size = heap_compute_data_size(tupleDescriptor,
124 values, isnull);
125 #endif
126 size = hoff + data_size;
127 size = MAXALIGN(size); /* be conservative */
129 tp = (char *) palloc0(size);
130 tuple = (IndexTuple) tp;
132 heap_fill_tuple(tupleDescriptor,
133 #ifdef TOAST_INDEX_HACK
134 untoasted_values,
135 #else
136 values,
137 #endif
138 isnull,
139 (char *) tp + hoff,
140 data_size,
141 &tupmask,
142 (hasnull ? (bits8 *) tp + sizeof(IndexTupleData) : NULL));
144 #ifdef TOAST_INDEX_HACK
145 for (i = 0; i < numberOfAttributes; i++)
147 if (untoasted_free[i])
148 pfree(DatumGetPointer(untoasted_values[i]));
150 #endif
153 * We do this because heap_fill_tuple wants to initialize a "tupmask"
154 * which is used for HeapTuples, but we want an indextuple infomask. The
155 * only relevant info is the "has variable attributes" field. We have
156 * already set the hasnull bit above.
158 if (tupmask & HEAP_HASVARWIDTH)
159 infomask |= INDEX_VAR_MASK;
162 * Here we make sure that the size will fit in the field reserved for it
163 * in t_info.
165 if ((size & INDEX_SIZE_MASK) != size)
166 ereport(ERROR,
167 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
168 errmsg("index row requires %lu bytes, maximum size is %lu",
169 (unsigned long) size,
170 (unsigned long) INDEX_SIZE_MASK)));
172 infomask |= size;
175 * initialize metadata
177 tuple->t_info = infomask;
178 return tuple;
181 /* ----------------
182 * nocache_index_getattr
184 * This gets called from index_getattr() macro, and only in cases
185 * where we can't use cacheoffset and the value is not null.
187 * This caches attribute offsets in the attribute descriptor.
189 * An alternative way to speed things up would be to cache offsets
190 * with the tuple, but that seems more difficult unless you take
191 * the storage hit of actually putting those offsets into the
192 * tuple you send to disk. Yuck.
194 * This scheme will be slightly slower than that, but should
195 * perform well for queries which hit large #'s of tuples. After
196 * you cache the offsets once, examining all the other tuples using
197 * the same attribute descriptor will go much quicker. -cim 5/4/91
198 * ----------------
200 Datum
201 nocache_index_getattr(IndexTuple tup,
202 int attnum,
203 TupleDesc tupleDesc,
204 bool *isnull)
206 Form_pg_attribute *att = tupleDesc->attrs;
207 char *tp; /* ptr to data part of tuple */
208 bits8 *bp = NULL; /* ptr to null bitmap in tuple */
209 bool slow = false; /* do we have to walk attrs? */
210 int data_off; /* tuple data offset */
211 int off; /* current offset within data */
213 (void) isnull; /* not used */
215 /* ----------------
216 * Three cases:
218 * 1: No nulls and no variable-width attributes.
219 * 2: Has a null or a var-width AFTER att.
220 * 3: Has nulls or var-widths BEFORE att.
221 * ----------------
224 #ifdef IN_MACRO
225 /* This is handled in the macro */
226 Assert(PointerIsValid(isnull));
227 Assert(attnum > 0);
229 *isnull = false;
230 #endif
232 data_off = IndexInfoFindDataOffset(tup->t_info);
234 attnum--;
236 if (!IndexTupleHasNulls(tup))
238 #ifdef IN_MACRO
239 /* This is handled in the macro */
240 if (att[attnum]->attcacheoff >= 0)
242 return fetchatt(att[attnum],
243 (char *) tup + data_off +
244 att[attnum]->attcacheoff);
246 #endif
248 else
251 * there's a null somewhere in the tuple
253 * check to see if desired att is null
256 /* XXX "knows" t_bits are just after fixed tuple header! */
257 bp = (bits8 *) ((char *) tup + sizeof(IndexTupleData));
259 #ifdef IN_MACRO
260 /* This is handled in the macro */
262 if (att_isnull(attnum, bp))
264 *isnull = true;
265 return (Datum) NULL;
267 #endif
270 * Now check to see if any preceding bits are null...
273 int byte = attnum >> 3;
274 int finalbit = attnum & 0x07;
276 /* check for nulls "before" final bit of last byte */
277 if ((~bp[byte]) & ((1 << finalbit) - 1))
278 slow = true;
279 else
281 /* check for nulls in any "earlier" bytes */
282 int i;
284 for (i = 0; i < byte; i++)
286 if (bp[i] != 0xFF)
288 slow = true;
289 break;
296 tp = (char *) tup + data_off;
298 if (!slow)
301 * If we get here, there are no nulls up to and including the target
302 * attribute. If we have a cached offset, we can use it.
304 if (att[attnum]->attcacheoff >= 0)
306 return fetchatt(att[attnum],
307 tp + att[attnum]->attcacheoff);
311 * Otherwise, check for non-fixed-length attrs up to and including
312 * target. If there aren't any, it's safe to cheaply initialize the
313 * cached offsets for these attrs.
315 if (IndexTupleHasVarwidths(tup))
317 int j;
319 for (j = 0; j <= attnum; j++)
321 if (att[j]->attlen <= 0)
323 slow = true;
324 break;
330 if (!slow)
332 int natts = tupleDesc->natts;
333 int j = 1;
336 * If we get here, we have a tuple with no nulls or var-widths up to
337 * and including the target attribute, so we can use the cached offset
338 * ... only we don't have it yet, or we'd not have got here. Since
339 * it's cheap to compute offsets for fixed-width columns, we take the
340 * opportunity to initialize the cached offsets for *all* the leading
341 * fixed-width columns, in hope of avoiding future visits to this
342 * routine.
344 att[0]->attcacheoff = 0;
346 /* we might have set some offsets in the slow path previously */
347 while (j < natts && att[j]->attcacheoff > 0)
348 j++;
350 off = att[j - 1]->attcacheoff + att[j - 1]->attlen;
352 for (; j < natts; j++)
354 if (att[j]->attlen <= 0)
355 break;
357 off = att_align_nominal(off, att[j]->attalign);
359 att[j]->attcacheoff = off;
361 off += att[j]->attlen;
364 Assert(j > attnum);
366 off = att[attnum]->attcacheoff;
368 else
370 bool usecache = true;
371 int i;
374 * Now we know that we have to walk the tuple CAREFULLY. But we still
375 * might be able to cache some offsets for next time.
377 * Note - This loop is a little tricky. For each non-null attribute,
378 * we have to first account for alignment padding before the attr,
379 * then advance over the attr based on its length. Nulls have no
380 * storage and no alignment padding either. We can use/set
381 * attcacheoff until we reach either a null or a var-width attribute.
383 off = 0;
384 for (i = 0;; i++) /* loop exit is at "break" */
386 if (IndexTupleHasNulls(tup) && att_isnull(i, bp))
388 usecache = false;
389 continue; /* this cannot be the target att */
392 /* If we know the next offset, we can skip the rest */
393 if (usecache && att[i]->attcacheoff >= 0)
394 off = att[i]->attcacheoff;
395 else if (att[i]->attlen == -1)
398 * We can only cache the offset for a varlena attribute if the
399 * offset is already suitably aligned, so that there would be
400 * no pad bytes in any case: then the offset will be valid for
401 * either an aligned or unaligned value.
403 if (usecache &&
404 off == att_align_nominal(off, att[i]->attalign))
405 att[i]->attcacheoff = off;
406 else
408 off = att_align_pointer(off, att[i]->attalign, -1,
409 tp + off);
410 usecache = false;
413 else
415 /* not varlena, so safe to use att_align_nominal */
416 off = att_align_nominal(off, att[i]->attalign);
418 if (usecache)
419 att[i]->attcacheoff = off;
422 if (i == attnum)
423 break;
425 off = att_addlength_pointer(off, att[i]->attlen, tp + off);
427 if (usecache && att[i]->attlen <= 0)
428 usecache = false;
432 return fetchatt(att[attnum], tp + off);
436 * Create a palloc'd copy of an index tuple.
438 IndexTuple
439 CopyIndexTuple(IndexTuple source)
441 IndexTuple result;
442 Size size;
444 size = IndexTupleSize(source);
445 result = (IndexTuple) palloc(size);
446 memcpy(result, source, size);
447 return result;