spi-topcliff-pch: Fix issue for transmitting over 4KByte
[zen-stable.git] / fs / ntfs / index.c
blob096c135691aeda354c553fdb622286ea7f048e06
1 /*
2 * index.c - NTFS kernel index handling. Part of the Linux-NTFS project.
4 * Copyright (c) 2004-2005 Anton Altaparmakov
6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program/include file is distributed in the hope that it will be
12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program (in the main directory of the Linux-NTFS
18 * distribution in the file COPYING); if not, write to the Free Software
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
24 #include "aops.h"
25 #include "collate.h"
26 #include "debug.h"
27 #include "index.h"
28 #include "ntfs.h"
30 /**
31 * ntfs_index_ctx_get - allocate and initialize a new index context
32 * @idx_ni: ntfs index inode with which to initialize the context
34 * Allocate a new index context, initialize it with @idx_ni and return it.
35 * Return NULL if allocation failed.
37 * Locking: Caller must hold i_mutex on the index inode.
39 ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
41 ntfs_index_context *ictx;
43 ictx = kmem_cache_alloc(ntfs_index_ctx_cache, GFP_NOFS);
44 if (ictx)
45 *ictx = (ntfs_index_context){ .idx_ni = idx_ni };
46 return ictx;
49 /**
50 * ntfs_index_ctx_put - release an index context
51 * @ictx: index context to free
53 * Release the index context @ictx, releasing all associated resources.
55 * Locking: Caller must hold i_mutex on the index inode.
57 void ntfs_index_ctx_put(ntfs_index_context *ictx)
59 if (ictx->entry) {
60 if (ictx->is_in_root) {
61 if (ictx->actx)
62 ntfs_attr_put_search_ctx(ictx->actx);
63 if (ictx->base_ni)
64 unmap_mft_record(ictx->base_ni);
65 } else {
66 struct page *page = ictx->page;
67 if (page) {
68 BUG_ON(!PageLocked(page));
69 unlock_page(page);
70 ntfs_unmap_page(page);
74 kmem_cache_free(ntfs_index_ctx_cache, ictx);
75 return;
78 /**
79 * ntfs_index_lookup - find a key in an index and return its index entry
80 * @key: [IN] key for which to search in the index
81 * @key_len: [IN] length of @key in bytes
82 * @ictx: [IN/OUT] context describing the index and the returned entry
84 * Before calling ntfs_index_lookup(), @ictx must have been obtained from a
85 * call to ntfs_index_ctx_get().
87 * Look for the @key in the index specified by the index lookup context @ictx.
88 * ntfs_index_lookup() walks the contents of the index looking for the @key.
90 * If the @key is found in the index, 0 is returned and @ictx is setup to
91 * describe the index entry containing the matching @key. @ictx->entry is the
92 * index entry and @ictx->data and @ictx->data_len are the index entry data and
93 * its length in bytes, respectively.
95 * If the @key is not found in the index, -ENOENT is returned and @ictx is
96 * setup to describe the index entry whose key collates immediately after the
97 * search @key, i.e. this is the position in the index at which an index entry
98 * with a key of @key would need to be inserted.
100 * If an error occurs return the negative error code and @ictx is left
101 * untouched.
103 * When finished with the entry and its data, call ntfs_index_ctx_put() to free
104 * the context and other associated resources.
106 * If the index entry was modified, call flush_dcache_index_entry_page()
107 * immediately after the modification and either ntfs_index_entry_mark_dirty()
108 * or ntfs_index_entry_write() before the call to ntfs_index_ctx_put() to
109 * ensure that the changes are written to disk.
111 * Locking: - Caller must hold i_mutex on the index inode.
112 * - Each page cache page in the index allocation mapping must be
113 * locked whilst being accessed otherwise we may find a corrupt
114 * page due to it being under ->writepage at the moment which
115 * applies the mst protection fixups before writing out and then
116 * removes them again after the write is complete after which it
117 * unlocks the page.
119 int ntfs_index_lookup(const void *key, const int key_len,
120 ntfs_index_context *ictx)
122 VCN vcn, old_vcn;
123 ntfs_inode *idx_ni = ictx->idx_ni;
124 ntfs_volume *vol = idx_ni->vol;
125 struct super_block *sb = vol->sb;
126 ntfs_inode *base_ni = idx_ni->ext.base_ntfs_ino;
127 MFT_RECORD *m;
128 INDEX_ROOT *ir;
129 INDEX_ENTRY *ie;
130 INDEX_ALLOCATION *ia;
131 u8 *index_end, *kaddr;
132 ntfs_attr_search_ctx *actx;
133 struct address_space *ia_mapping;
134 struct page *page;
135 int rc, err = 0;
137 ntfs_debug("Entering.");
138 BUG_ON(!NInoAttr(idx_ni));
139 BUG_ON(idx_ni->type != AT_INDEX_ALLOCATION);
140 BUG_ON(idx_ni->nr_extents != -1);
141 BUG_ON(!base_ni);
142 BUG_ON(!key);
143 BUG_ON(key_len <= 0);
144 if (!ntfs_is_collation_rule_supported(
145 idx_ni->itype.index.collation_rule)) {
146 ntfs_error(sb, "Index uses unsupported collation rule 0x%x. "
147 "Aborting lookup.", le32_to_cpu(
148 idx_ni->itype.index.collation_rule));
149 return -EOPNOTSUPP;
151 /* Get hold of the mft record for the index inode. */
152 m = map_mft_record(base_ni);
153 if (IS_ERR(m)) {
154 ntfs_error(sb, "map_mft_record() failed with error code %ld.",
155 -PTR_ERR(m));
156 return PTR_ERR(m);
158 actx = ntfs_attr_get_search_ctx(base_ni, m);
159 if (unlikely(!actx)) {
160 err = -ENOMEM;
161 goto err_out;
163 /* Find the index root attribute in the mft record. */
164 err = ntfs_attr_lookup(AT_INDEX_ROOT, idx_ni->name, idx_ni->name_len,
165 CASE_SENSITIVE, 0, NULL, 0, actx);
166 if (unlikely(err)) {
167 if (err == -ENOENT) {
168 ntfs_error(sb, "Index root attribute missing in inode "
169 "0x%lx.", idx_ni->mft_no);
170 err = -EIO;
172 goto err_out;
174 /* Get to the index root value (it has been verified in read_inode). */
175 ir = (INDEX_ROOT*)((u8*)actx->attr +
176 le16_to_cpu(actx->attr->data.resident.value_offset));
177 index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
178 /* The first index entry. */
179 ie = (INDEX_ENTRY*)((u8*)&ir->index +
180 le32_to_cpu(ir->index.entries_offset));
182 * Loop until we exceed valid memory (corruption case) or until we
183 * reach the last entry.
185 for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
186 /* Bounds checks. */
187 if ((u8*)ie < (u8*)actx->mrec || (u8*)ie +
188 sizeof(INDEX_ENTRY_HEADER) > index_end ||
189 (u8*)ie + le16_to_cpu(ie->length) > index_end)
190 goto idx_err_out;
192 * The last entry cannot contain a key. It can however contain
193 * a pointer to a child node in the B+tree so we just break out.
195 if (ie->flags & INDEX_ENTRY_END)
196 break;
197 /* Further bounds checks. */
198 if ((u32)sizeof(INDEX_ENTRY_HEADER) +
199 le16_to_cpu(ie->key_length) >
200 le16_to_cpu(ie->data.vi.data_offset) ||
201 (u32)le16_to_cpu(ie->data.vi.data_offset) +
202 le16_to_cpu(ie->data.vi.data_length) >
203 le16_to_cpu(ie->length))
204 goto idx_err_out;
205 /* If the keys match perfectly, we setup @ictx and return 0. */
206 if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
207 &ie->key, key_len)) {
208 ir_done:
209 ictx->is_in_root = true;
210 ictx->ir = ir;
211 ictx->actx = actx;
212 ictx->base_ni = base_ni;
213 ictx->ia = NULL;
214 ictx->page = NULL;
215 done:
216 ictx->entry = ie;
217 ictx->data = (u8*)ie +
218 le16_to_cpu(ie->data.vi.data_offset);
219 ictx->data_len = le16_to_cpu(ie->data.vi.data_length);
220 ntfs_debug("Done.");
221 return err;
224 * Not a perfect match, need to do full blown collation so we
225 * know which way in the B+tree we have to go.
227 rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key,
228 key_len, &ie->key, le16_to_cpu(ie->key_length));
230 * If @key collates before the key of the current entry, there
231 * is definitely no such key in this index but we might need to
232 * descend into the B+tree so we just break out of the loop.
234 if (rc == -1)
235 break;
237 * A match should never happen as the memcmp() call should have
238 * cought it, but we still treat it correctly.
240 if (!rc)
241 goto ir_done;
242 /* The keys are not equal, continue the search. */
245 * We have finished with this index without success. Check for the
246 * presence of a child node and if not present setup @ictx and return
247 * -ENOENT.
249 if (!(ie->flags & INDEX_ENTRY_NODE)) {
250 ntfs_debug("Entry not found.");
251 err = -ENOENT;
252 goto ir_done;
253 } /* Child node present, descend into it. */
254 /* Consistency check: Verify that an index allocation exists. */
255 if (!NInoIndexAllocPresent(idx_ni)) {
256 ntfs_error(sb, "No index allocation attribute but index entry "
257 "requires one. Inode 0x%lx is corrupt or "
258 "driver bug.", idx_ni->mft_no);
259 goto err_out;
261 /* Get the starting vcn of the index_block holding the child node. */
262 vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
263 ia_mapping = VFS_I(idx_ni)->i_mapping;
265 * We are done with the index root and the mft record. Release them,
266 * otherwise we deadlock with ntfs_map_page().
268 ntfs_attr_put_search_ctx(actx);
269 unmap_mft_record(base_ni);
270 m = NULL;
271 actx = NULL;
272 descend_into_child_node:
274 * Convert vcn to index into the index allocation attribute in units
275 * of PAGE_CACHE_SIZE and map the page cache page, reading it from
276 * disk if necessary.
278 page = ntfs_map_page(ia_mapping, vcn <<
279 idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
280 if (IS_ERR(page)) {
281 ntfs_error(sb, "Failed to map index page, error %ld.",
282 -PTR_ERR(page));
283 err = PTR_ERR(page);
284 goto err_out;
286 lock_page(page);
287 kaddr = (u8*)page_address(page);
288 fast_descend_into_child_node:
289 /* Get to the index allocation block. */
290 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
291 idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
292 /* Bounds checks. */
293 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
294 ntfs_error(sb, "Out of bounds check failed. Corrupt inode "
295 "0x%lx or driver bug.", idx_ni->mft_no);
296 goto unm_err_out;
298 /* Catch multi sector transfer fixup errors. */
299 if (unlikely(!ntfs_is_indx_record(ia->magic))) {
300 ntfs_error(sb, "Index record with vcn 0x%llx is corrupt. "
301 "Corrupt inode 0x%lx. Run chkdsk.",
302 (long long)vcn, idx_ni->mft_no);
303 goto unm_err_out;
305 if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
306 ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
307 "different from expected VCN (0x%llx). Inode "
308 "0x%lx is corrupt or driver bug.",
309 (unsigned long long)
310 sle64_to_cpu(ia->index_block_vcn),
311 (unsigned long long)vcn, idx_ni->mft_no);
312 goto unm_err_out;
314 if (le32_to_cpu(ia->index.allocated_size) + 0x18 !=
315 idx_ni->itype.index.block_size) {
316 ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx has "
317 "a size (%u) differing from the index "
318 "specified size (%u). Inode is corrupt or "
319 "driver bug.", (unsigned long long)vcn,
320 idx_ni->mft_no,
321 le32_to_cpu(ia->index.allocated_size) + 0x18,
322 idx_ni->itype.index.block_size);
323 goto unm_err_out;
325 index_end = (u8*)ia + idx_ni->itype.index.block_size;
326 if (index_end > kaddr + PAGE_CACHE_SIZE) {
327 ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
328 "crosses page boundary. Impossible! Cannot "
329 "access! This is probably a bug in the "
330 "driver.", (unsigned long long)vcn,
331 idx_ni->mft_no);
332 goto unm_err_out;
334 index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
335 if (index_end > (u8*)ia + idx_ni->itype.index.block_size) {
336 ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of inode "
337 "0x%lx exceeds maximum size.",
338 (unsigned long long)vcn, idx_ni->mft_no);
339 goto unm_err_out;
341 /* The first index entry. */
342 ie = (INDEX_ENTRY*)((u8*)&ia->index +
343 le32_to_cpu(ia->index.entries_offset));
345 * Iterate similar to above big loop but applied to index buffer, thus
346 * loop until we exceed valid memory (corruption case) or until we
347 * reach the last entry.
349 for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
350 /* Bounds checks. */
351 if ((u8*)ie < (u8*)ia || (u8*)ie +
352 sizeof(INDEX_ENTRY_HEADER) > index_end ||
353 (u8*)ie + le16_to_cpu(ie->length) > index_end) {
354 ntfs_error(sb, "Index entry out of bounds in inode "
355 "0x%lx.", idx_ni->mft_no);
356 goto unm_err_out;
359 * The last entry cannot contain a key. It can however contain
360 * a pointer to a child node in the B+tree so we just break out.
362 if (ie->flags & INDEX_ENTRY_END)
363 break;
364 /* Further bounds checks. */
365 if ((u32)sizeof(INDEX_ENTRY_HEADER) +
366 le16_to_cpu(ie->key_length) >
367 le16_to_cpu(ie->data.vi.data_offset) ||
368 (u32)le16_to_cpu(ie->data.vi.data_offset) +
369 le16_to_cpu(ie->data.vi.data_length) >
370 le16_to_cpu(ie->length)) {
371 ntfs_error(sb, "Index entry out of bounds in inode "
372 "0x%lx.", idx_ni->mft_no);
373 goto unm_err_out;
375 /* If the keys match perfectly, we setup @ictx and return 0. */
376 if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
377 &ie->key, key_len)) {
378 ia_done:
379 ictx->is_in_root = false;
380 ictx->actx = NULL;
381 ictx->base_ni = NULL;
382 ictx->ia = ia;
383 ictx->page = page;
384 goto done;
387 * Not a perfect match, need to do full blown collation so we
388 * know which way in the B+tree we have to go.
390 rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key,
391 key_len, &ie->key, le16_to_cpu(ie->key_length));
393 * If @key collates before the key of the current entry, there
394 * is definitely no such key in this index but we might need to
395 * descend into the B+tree so we just break out of the loop.
397 if (rc == -1)
398 break;
400 * A match should never happen as the memcmp() call should have
401 * cought it, but we still treat it correctly.
403 if (!rc)
404 goto ia_done;
405 /* The keys are not equal, continue the search. */
408 * We have finished with this index buffer without success. Check for
409 * the presence of a child node and if not present return -ENOENT.
411 if (!(ie->flags & INDEX_ENTRY_NODE)) {
412 ntfs_debug("Entry not found.");
413 err = -ENOENT;
414 goto ia_done;
416 if ((ia->index.flags & NODE_MASK) == LEAF_NODE) {
417 ntfs_error(sb, "Index entry with child node found in a leaf "
418 "node in inode 0x%lx.", idx_ni->mft_no);
419 goto unm_err_out;
421 /* Child node present, descend into it. */
422 old_vcn = vcn;
423 vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
424 if (vcn >= 0) {
426 * If vcn is in the same page cache page as old_vcn we recycle
427 * the mapped page.
429 if (old_vcn << vol->cluster_size_bits >>
430 PAGE_CACHE_SHIFT == vcn <<
431 vol->cluster_size_bits >>
432 PAGE_CACHE_SHIFT)
433 goto fast_descend_into_child_node;
434 unlock_page(page);
435 ntfs_unmap_page(page);
436 goto descend_into_child_node;
438 ntfs_error(sb, "Negative child node vcn in inode 0x%lx.",
439 idx_ni->mft_no);
440 unm_err_out:
441 unlock_page(page);
442 ntfs_unmap_page(page);
443 err_out:
444 if (!err)
445 err = -EIO;
446 if (actx)
447 ntfs_attr_put_search_ctx(actx);
448 if (m)
449 unmap_mft_record(base_ni);
450 return err;
451 idx_err_out:
452 ntfs_error(sb, "Corrupt index. Aborting lookup.");
453 goto err_out;