Delete unmasked credit cards when clearing data.
[chromium-blink-merge.git] / net / disk_cache / disk_cache.h
blob769ab361b199695bc68827815fc3e481cf39666a
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // Defines the public interface of the disk cache. For more details see
6 // http://dev.chromium.org/developers/design-documents/network-stack/disk-cache
8 #ifndef NET_DISK_CACHE_DISK_CACHE_H_
9 #define NET_DISK_CACHE_DISK_CACHE_H_
11 #include <string>
12 #include <vector>
14 #include "base/basictypes.h"
15 #include "base/memory/ref_counted.h"
16 #include "base/memory/scoped_ptr.h"
17 #include "base/time/time.h"
18 #include "net/base/cache_type.h"
19 #include "net/base/completion_callback.h"
20 #include "net/base/net_export.h"
22 namespace base {
23 class FilePath;
24 class SingleThreadTaskRunner;
27 namespace net {
28 class IOBuffer;
29 class NetLog;
32 namespace disk_cache {
34 class Entry;
35 class Backend;
37 // Returns an instance of a Backend of the given |type|. |path| points to a
38 // folder where the cached data will be stored (if appropriate). This cache
39 // instance must be the only object that will be reading or writing files to
40 // that folder. The returned object should be deleted when not needed anymore.
41 // If |force| is true, and there is a problem with the cache initialization, the
42 // files will be deleted and a new set will be created. |max_bytes| is the
43 // maximum size the cache can grow to. If zero is passed in as |max_bytes|, the
44 // cache will determine the value to use. |thread| can be used to perform IO
45 // operations if a dedicated thread is required; a valid value is expected for
46 // any backend that performs operations on a disk. The returned pointer can be
47 // NULL if a fatal error is found. The actual return value of the function is a
48 // net error code. If this function returns ERR_IO_PENDING, the |callback| will
49 // be invoked when a backend is available or a fatal error condition is reached.
50 // The pointer to receive the |backend| must remain valid until the operation
51 // completes (the callback is notified).
52 NET_EXPORT int CreateCacheBackend(
53 net::CacheType type,
54 net::BackendType backend_type,
55 const base::FilePath& path,
56 int max_bytes,
57 bool force,
58 const scoped_refptr<base::SingleThreadTaskRunner>& thread,
59 net::NetLog* net_log,
60 scoped_ptr<Backend>* backend,
61 const net::CompletionCallback& callback);
63 // The root interface for a disk cache instance.
64 class NET_EXPORT Backend {
65 public:
66 typedef net::CompletionCallback CompletionCallback;
68 class Iterator {
69 public:
70 virtual ~Iterator() {}
72 // OpenNextEntry returns |net::OK| and provides |next_entry| if there is an
73 // entry to enumerate. It returns |net::ERR_FAILED| at the end of
74 // enumeration. If the function returns |net::ERR_IO_PENDING|, then the
75 // final result will be passed to the provided |callback|, otherwise
76 // |callback| will not be called. If any entry in the cache is modified
77 // during iteration, the result of this function is thereafter undefined.
79 // Calling OpenNextEntry after the backend which created it is destroyed
80 // may fail with |net::ERR_FAILED|; however it should not crash.
82 // Some cache backends make stronger guarantees about mutation during
83 // iteration, see top comment in simple_backend_impl.h for details.
84 virtual int OpenNextEntry(Entry** next_entry,
85 const CompletionCallback& callback) = 0;
88 // If the backend is destroyed when there are operations in progress (any
89 // callback that has not been invoked yet), this method cancels said
90 // operations so the callbacks are not invoked, possibly leaving the work
91 // half way (for instance, dooming just a few entries). Note that pending IO
92 // for a given Entry (as opposed to the Backend) will still generate a
93 // callback from within this method.
94 virtual ~Backend() {}
96 // Returns the type of this cache.
97 virtual net::CacheType GetCacheType() const = 0;
99 // Returns the number of entries in the cache.
100 virtual int32 GetEntryCount() const = 0;
102 // Opens an existing entry. Upon success, |entry| holds a pointer to an Entry
103 // object representing the specified disk cache entry. When the entry pointer
104 // is no longer needed, its Close method should be called. The return value is
105 // a net error code. If this method returns ERR_IO_PENDING, the |callback|
106 // will be invoked when the entry is available. The pointer to receive the
107 // |entry| must remain valid until the operation completes.
108 virtual int OpenEntry(const std::string& key, Entry** entry,
109 const CompletionCallback& callback) = 0;
111 // Creates a new entry. Upon success, the out param holds a pointer to an
112 // Entry object representing the newly created disk cache entry. When the
113 // entry pointer is no longer needed, its Close method should be called. The
114 // return value is a net error code. If this method returns ERR_IO_PENDING,
115 // the |callback| will be invoked when the entry is available. The pointer to
116 // receive the |entry| must remain valid until the operation completes.
117 virtual int CreateEntry(const std::string& key, Entry** entry,
118 const CompletionCallback& callback) = 0;
120 // Marks the entry, specified by the given key, for deletion. The return value
121 // is a net error code. If this method returns ERR_IO_PENDING, the |callback|
122 // will be invoked after the entry is doomed.
123 virtual int DoomEntry(const std::string& key,
124 const CompletionCallback& callback) = 0;
126 // Marks all entries for deletion. The return value is a net error code. If
127 // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
128 // operation completes.
129 virtual int DoomAllEntries(const CompletionCallback& callback) = 0;
131 // Marks a range of entries for deletion. This supports unbounded deletes in
132 // either direction by using null Time values for either argument. The return
133 // value is a net error code. If this method returns ERR_IO_PENDING, the
134 // |callback| will be invoked when the operation completes.
135 // Entries with |initial_time| <= access time < |end_time| are deleted.
136 virtual int DoomEntriesBetween(base::Time initial_time,
137 base::Time end_time,
138 const CompletionCallback& callback) = 0;
140 // Marks all entries accessed since |initial_time| for deletion. The return
141 // value is a net error code. If this method returns ERR_IO_PENDING, the
142 // |callback| will be invoked when the operation completes.
143 // Entries with |initial_time| <= access time are deleted.
144 virtual int DoomEntriesSince(base::Time initial_time,
145 const CompletionCallback& callback) = 0;
147 // Returns an iterator which will enumerate all entries of the cache in an
148 // undefined order.
149 virtual scoped_ptr<Iterator> CreateIterator() = 0;
151 // Return a list of cache statistics.
152 virtual void GetStats(
153 std::vector<std::pair<std::string, std::string> >* stats) = 0;
155 // Called whenever an external cache in the system reuses the resource
156 // referred to by |key|.
157 virtual void OnExternalCacheHit(const std::string& key) = 0;
160 // This interface represents an entry in the disk cache.
161 class NET_EXPORT Entry {
162 public:
163 typedef net::CompletionCallback CompletionCallback;
164 typedef net::IOBuffer IOBuffer;
166 // Marks this cache entry for deletion.
167 virtual void Doom() = 0;
169 // Releases this entry. Calling this method does not cancel pending IO
170 // operations on this entry. Even after the last reference to this object has
171 // been released, pending completion callbacks may be invoked.
172 virtual void Close() = 0;
174 // Returns the key associated with this cache entry.
175 virtual std::string GetKey() const = 0;
177 // Returns the time when this cache entry was last used.
178 virtual base::Time GetLastUsed() const = 0;
180 // Returns the time when this cache entry was last modified.
181 virtual base::Time GetLastModified() const = 0;
183 // Returns the size of the cache data with the given index.
184 virtual int32 GetDataSize(int index) const = 0;
186 // Copies cached data into the given buffer of length |buf_len|. Returns the
187 // number of bytes read or a network error code. If this function returns
188 // ERR_IO_PENDING, the completion callback will be called on the current
189 // thread when the operation completes, and a reference to |buf| will be
190 // retained until the callback is called. Note that as long as the function
191 // does not complete immediately, the callback will always be invoked, even
192 // after Close has been called; in other words, the caller may close this
193 // entry without having to wait for all the callbacks, and still rely on the
194 // cleanup performed from the callback code.
195 virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len,
196 const CompletionCallback& callback) = 0;
198 // Copies data from the given buffer of length |buf_len| into the cache.
199 // Returns the number of bytes written or a network error code. If this
200 // function returns ERR_IO_PENDING, the completion callback will be called
201 // on the current thread when the operation completes, and a reference to
202 // |buf| will be retained until the callback is called. Note that as long as
203 // the function does not complete immediately, the callback will always be
204 // invoked, even after Close has been called; in other words, the caller may
205 // close this entry without having to wait for all the callbacks, and still
206 // rely on the cleanup performed from the callback code.
207 // If truncate is true, this call will truncate the stored data at the end of
208 // what we are writing here.
209 virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len,
210 const CompletionCallback& callback,
211 bool truncate) = 0;
213 // Sparse entries support:
215 // A Backend implementation can support sparse entries, so the cache keeps
216 // track of which parts of the entry have been written before. The backend
217 // will never return data that was not written previously, so reading from
218 // such region will return 0 bytes read (or actually the number of bytes read
219 // before reaching that region).
221 // There are only two streams for sparse entries: a regular control stream
222 // (index 0) that must be accessed through the regular API (ReadData and
223 // WriteData), and one sparse stream that must me accessed through the sparse-
224 // aware API that follows. Calling a non-sparse aware method with an index
225 // argument other than 0 is a mistake that results in implementation specific
226 // behavior. Using a sparse-aware method with an entry that was not stored
227 // using the same API, or with a backend that doesn't support sparse entries
228 // will return ERR_CACHE_OPERATION_NOT_SUPPORTED.
230 // The storage granularity of the implementation should be at least 1 KB. In
231 // other words, storing less than 1 KB may result in an implementation
232 // dropping the data completely, and writing at offsets not aligned with 1 KB,
233 // or with lengths not a multiple of 1 KB may result in the first or last part
234 // of the data being discarded. However, two consecutive writes should not
235 // result in a hole in between the two parts as long as they are sequential
236 // (the second one starts where the first one ended), and there is no other
237 // write between them.
239 // The Backend implementation is free to evict any range from the cache at any
240 // moment, so in practice, the previously stated granularity of 1 KB is not
241 // as bad as it sounds.
243 // The sparse methods don't support multiple simultaneous IO operations to the
244 // same physical entry, so in practice a single object should be instantiated
245 // for a given key at any given time. Once an operation has been issued, the
246 // caller should wait until it completes before starting another one. This
247 // requirement includes the case when an entry is closed while some operation
248 // is in progress and another object is instantiated; any IO operation will
249 // fail while the previous operation is still in-flight. In order to deal with
250 // this requirement, the caller could either wait until the operation
251 // completes before closing the entry, or call CancelSparseIO() before closing
252 // the entry, and call ReadyForSparseIO() on the new entry and wait for the
253 // callback before issuing new operations.
255 // Behaves like ReadData() except that this method is used to access sparse
256 // entries.
257 virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
258 const CompletionCallback& callback) = 0;
260 // Behaves like WriteData() except that this method is used to access sparse
261 // entries. |truncate| is not part of this interface because a sparse entry
262 // is not expected to be reused with new data. To delete the old data and
263 // start again, or to reduce the total size of the stream data (which implies
264 // that the content has changed), the whole entry should be doomed and
265 // re-created.
266 virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
267 const CompletionCallback& callback) = 0;
269 // Returns information about the currently stored portion of a sparse entry.
270 // |offset| and |len| describe a particular range that should be scanned to
271 // find out if it is stored or not. |start| will contain the offset of the
272 // first byte that is stored within this range, and the return value is the
273 // minimum number of consecutive stored bytes. Note that it is possible that
274 // this entry has stored more than the returned value. This method returns a
275 // net error code whenever the request cannot be completed successfully. If
276 // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
277 // operation completes, and |start| must remain valid until that point.
278 virtual int GetAvailableRange(int64 offset, int len, int64* start,
279 const CompletionCallback& callback) = 0;
281 // Returns true if this entry could be a sparse entry or false otherwise. This
282 // is a quick test that may return true even if the entry is not really
283 // sparse. This method doesn't modify the state of this entry (it will not
284 // create sparse tracking data). GetAvailableRange or ReadSparseData can be
285 // used to perform a definitive test of whether an existing entry is sparse or
286 // not, but that method may modify the current state of the entry (making it
287 // sparse, for instance). The purpose of this method is to test an existing
288 // entry, but without generating actual IO to perform a thorough check.
289 virtual bool CouldBeSparse() const = 0;
291 // Cancels any pending sparse IO operation (if any). The completion callback
292 // of the operation in question will still be called when the operation
293 // finishes, but the operation will finish sooner when this method is used.
294 virtual void CancelSparseIO() = 0;
296 // Returns OK if this entry can be used immediately. If that is not the
297 // case, returns ERR_IO_PENDING and invokes the provided callback when this
298 // entry is ready to use. This method always returns OK for non-sparse
299 // entries, and returns ERR_IO_PENDING when a previous operation was cancelled
300 // (by calling CancelSparseIO), but the cache is still busy with it. If there
301 // is a pending operation that has not been cancelled, this method will return
302 // OK although another IO operation cannot be issued at this time; in this
303 // case the caller should just wait for the regular callback to be invoked
304 // instead of using this method to provide another callback.
306 // Note that CancelSparseIO may have been called on another instance of this
307 // object that refers to the same physical disk entry.
308 // Note: This method is deprecated.
309 virtual int ReadyForSparseIO(const CompletionCallback& callback) = 0;
311 protected:
312 virtual ~Entry() {}
315 struct EntryDeleter {
316 void operator()(Entry* entry) {
317 // Note that |entry| is ref-counted.
318 entry->Close();
322 // Automatically closes an entry when it goes out of scope.
323 typedef scoped_ptr<Entry, EntryDeleter> ScopedEntryPtr;
325 } // namespace disk_cache
327 #endif // NET_DISK_CACHE_DISK_CACHE_H_