1 /*-------------------------------------------------------------------------
4 * This file contains index tuple accessor and mutator routines,
5 * as well as various tuple utilities.
7 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
14 *-------------------------------------------------------------------------
19 #include "access/heapam.h"
20 #include "access/itup.h"
21 #include "access/tuptoaster.h"
24 /* ----------------------------------------------------------------
25 * index_ tuple interface routines
26 * ----------------------------------------------------------------
34 index_form_tuple(TupleDesc tupleDescriptor
,
38 char *tp
; /* tuple pointer */
39 IndexTuple tuple
; /* return tuple */
44 unsigned short infomask
= 0;
47 int numberOfAttributes
= tupleDescriptor
->natts
;
49 #ifdef TOAST_INDEX_HACK
50 Datum untoasted_values
[INDEX_MAX_KEYS
];
51 bool untoasted_free
[INDEX_MAX_KEYS
];
54 if (numberOfAttributes
> INDEX_MAX_KEYS
)
56 (errcode(ERRCODE_TOO_MANY_COLUMNS
),
57 errmsg("number of index columns (%d) exceeds limit (%d)",
58 numberOfAttributes
, INDEX_MAX_KEYS
)));
60 #ifdef TOAST_INDEX_HACK
61 for (i
= 0; i
< numberOfAttributes
; i
++)
63 Form_pg_attribute att
= tupleDescriptor
->attrs
[i
];
65 untoasted_values
[i
] = values
[i
];
66 untoasted_free
[i
] = false;
68 /* Do nothing if value is NULL or not of varlena type */
69 if (isnull
[i
] || att
->attlen
!= -1)
73 * If value is stored EXTERNAL, must fetch it so we are not depending
74 * on outside storage. This should be improved someday.
76 if (VARATT_IS_EXTERNAL(DatumGetPointer(values
[i
])))
79 PointerGetDatum(heap_tuple_fetch_attr((struct varlena
*)
80 DatumGetPointer(values
[i
])));
81 untoasted_free
[i
] = true;
85 * If value is above size target, and is of a compressible datatype,
86 * try to compress it in-line.
88 if (!VARATT_IS_EXTENDED(DatumGetPointer(untoasted_values
[i
])) &&
89 VARSIZE(DatumGetPointer(untoasted_values
[i
])) > TOAST_INDEX_TARGET
&&
90 (att
->attstorage
== 'x' || att
->attstorage
== 'm'))
92 Datum cvalue
= toast_compress_datum(untoasted_values
[i
]);
94 if (DatumGetPointer(cvalue
) != NULL
)
96 /* successful compression */
97 if (untoasted_free
[i
])
98 pfree(DatumGetPointer(untoasted_values
[i
]));
99 untoasted_values
[i
] = cvalue
;
100 untoasted_free
[i
] = true;
106 for (i
= 0; i
< numberOfAttributes
; i
++)
116 infomask
|= INDEX_NULL_MASK
;
118 hoff
= IndexInfoFindDataOffset(infomask
);
119 #ifdef TOAST_INDEX_HACK
120 data_size
= heap_compute_data_size(tupleDescriptor
,
121 untoasted_values
, isnull
);
123 data_size
= heap_compute_data_size(tupleDescriptor
,
126 size
= hoff
+ data_size
;
127 size
= MAXALIGN(size
); /* be conservative */
129 tp
= (char *) palloc0(size
);
130 tuple
= (IndexTuple
) tp
;
132 heap_fill_tuple(tupleDescriptor
,
133 #ifdef TOAST_INDEX_HACK
142 (hasnull
? (bits8
*) tp
+ sizeof(IndexTupleData
) : NULL
));
144 #ifdef TOAST_INDEX_HACK
145 for (i
= 0; i
< numberOfAttributes
; i
++)
147 if (untoasted_free
[i
])
148 pfree(DatumGetPointer(untoasted_values
[i
]));
153 * We do this because heap_fill_tuple wants to initialize a "tupmask"
154 * which is used for HeapTuples, but we want an indextuple infomask. The
155 * only relevant info is the "has variable attributes" field. We have
156 * already set the hasnull bit above.
158 if (tupmask
& HEAP_HASVARWIDTH
)
159 infomask
|= INDEX_VAR_MASK
;
162 * Here we make sure that the size will fit in the field reserved for it
165 if ((size
& INDEX_SIZE_MASK
) != size
)
167 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED
),
168 errmsg("index row requires %lu bytes, maximum size is %lu",
169 (unsigned long) size
,
170 (unsigned long) INDEX_SIZE_MASK
)));
175 * initialize metadata
177 tuple
->t_info
= infomask
;
182 * nocache_index_getattr
184 * This gets called from index_getattr() macro, and only in cases
185 * where we can't use cacheoffset and the value is not null.
187 * This caches attribute offsets in the attribute descriptor.
189 * An alternative way to speed things up would be to cache offsets
190 * with the tuple, but that seems more difficult unless you take
191 * the storage hit of actually putting those offsets into the
192 * tuple you send to disk. Yuck.
194 * This scheme will be slightly slower than that, but should
195 * perform well for queries which hit large #'s of tuples. After
196 * you cache the offsets once, examining all the other tuples using
197 * the same attribute descriptor will go much quicker. -cim 5/4/91
201 nocache_index_getattr(IndexTuple tup
,
206 Form_pg_attribute
*att
= tupleDesc
->attrs
;
207 char *tp
; /* ptr to data part of tuple */
208 bits8
*bp
= NULL
; /* ptr to null bitmap in tuple */
209 bool slow
= false; /* do we have to walk attrs? */
210 int data_off
; /* tuple data offset */
211 int off
; /* current offset within data */
213 (void) isnull
; /* not used */
218 * 1: No nulls and no variable-width attributes.
219 * 2: Has a null or a var-width AFTER att.
220 * 3: Has nulls or var-widths BEFORE att.
225 /* This is handled in the macro */
226 Assert(PointerIsValid(isnull
));
232 data_off
= IndexInfoFindDataOffset(tup
->t_info
);
236 if (!IndexTupleHasNulls(tup
))
239 /* This is handled in the macro */
240 if (att
[attnum
]->attcacheoff
>= 0)
242 return fetchatt(att
[attnum
],
243 (char *) tup
+ data_off
+
244 att
[attnum
]->attcacheoff
);
251 * there's a null somewhere in the tuple
253 * check to see if desired att is null
256 /* XXX "knows" t_bits are just after fixed tuple header! */
257 bp
= (bits8
*) ((char *) tup
+ sizeof(IndexTupleData
));
260 /* This is handled in the macro */
262 if (att_isnull(attnum
, bp
))
270 * Now check to see if any preceding bits are null...
273 int byte
= attnum
>> 3;
274 int finalbit
= attnum
& 0x07;
276 /* check for nulls "before" final bit of last byte */
277 if ((~bp
[byte
]) & ((1 << finalbit
) - 1))
281 /* check for nulls in any "earlier" bytes */
284 for (i
= 0; i
< byte
; i
++)
296 tp
= (char *) tup
+ data_off
;
301 * If we get here, there are no nulls up to and including the target
302 * attribute. If we have a cached offset, we can use it.
304 if (att
[attnum
]->attcacheoff
>= 0)
306 return fetchatt(att
[attnum
],
307 tp
+ att
[attnum
]->attcacheoff
);
311 * Otherwise, check for non-fixed-length attrs up to and including
312 * target. If there aren't any, it's safe to cheaply initialize the
313 * cached offsets for these attrs.
315 if (IndexTupleHasVarwidths(tup
))
319 for (j
= 0; j
<= attnum
; j
++)
321 if (att
[j
]->attlen
<= 0)
332 int natts
= tupleDesc
->natts
;
336 * If we get here, we have a tuple with no nulls or var-widths up to
337 * and including the target attribute, so we can use the cached offset
338 * ... only we don't have it yet, or we'd not have got here. Since
339 * it's cheap to compute offsets for fixed-width columns, we take the
340 * opportunity to initialize the cached offsets for *all* the leading
341 * fixed-width columns, in hope of avoiding future visits to this
344 att
[0]->attcacheoff
= 0;
346 /* we might have set some offsets in the slow path previously */
347 while (j
< natts
&& att
[j
]->attcacheoff
> 0)
350 off
= att
[j
- 1]->attcacheoff
+ att
[j
- 1]->attlen
;
352 for (; j
< natts
; j
++)
354 if (att
[j
]->attlen
<= 0)
357 off
= att_align_nominal(off
, att
[j
]->attalign
);
359 att
[j
]->attcacheoff
= off
;
361 off
+= att
[j
]->attlen
;
366 off
= att
[attnum
]->attcacheoff
;
370 bool usecache
= true;
374 * Now we know that we have to walk the tuple CAREFULLY. But we still
375 * might be able to cache some offsets for next time.
377 * Note - This loop is a little tricky. For each non-null attribute,
378 * we have to first account for alignment padding before the attr,
379 * then advance over the attr based on its length. Nulls have no
380 * storage and no alignment padding either. We can use/set
381 * attcacheoff until we reach either a null or a var-width attribute.
384 for (i
= 0;; i
++) /* loop exit is at "break" */
386 if (IndexTupleHasNulls(tup
) && att_isnull(i
, bp
))
389 continue; /* this cannot be the target att */
392 /* If we know the next offset, we can skip the rest */
393 if (usecache
&& att
[i
]->attcacheoff
>= 0)
394 off
= att
[i
]->attcacheoff
;
395 else if (att
[i
]->attlen
== -1)
398 * We can only cache the offset for a varlena attribute if the
399 * offset is already suitably aligned, so that there would be
400 * no pad bytes in any case: then the offset will be valid for
401 * either an aligned or unaligned value.
404 off
== att_align_nominal(off
, att
[i
]->attalign
))
405 att
[i
]->attcacheoff
= off
;
408 off
= att_align_pointer(off
, att
[i
]->attalign
, -1,
415 /* not varlena, so safe to use att_align_nominal */
416 off
= att_align_nominal(off
, att
[i
]->attalign
);
419 att
[i
]->attcacheoff
= off
;
425 off
= att_addlength_pointer(off
, att
[i
]->attlen
, tp
+ off
);
427 if (usecache
&& att
[i
]->attlen
<= 0)
432 return fetchatt(att
[attnum
], tp
+ off
);
436 * Create a palloc'd copy of an index tuple.
439 CopyIndexTuple(IndexTuple source
)
444 size
= IndexTupleSize(source
);
445 result
= (IndexTuple
) palloc(size
);
446 memcpy(result
, source
, size
);