Revert of Revert of Use BoringSSL in the implementation of ClearKey for Chromecast...
[chromium-blink-merge.git] / net / disk_cache / disk_cache.h
blob84c1ee99516654258486494a8afeacfb70ddc31d
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // Defines the public interface of the disk cache. For more details see
6 // http://dev.chromium.org/developers/design-documents/network-stack/disk-cache
8 #ifndef NET_DISK_CACHE_DISK_CACHE_H_
9 #define NET_DISK_CACHE_DISK_CACHE_H_
11 #include <string>
12 #include <vector>
14 #include "base/basictypes.h"
15 #include "base/memory/ref_counted.h"
16 #include "base/memory/scoped_ptr.h"
17 #include "base/strings/string_split.h"
18 #include "base/time/time.h"
19 #include "net/base/cache_type.h"
20 #include "net/base/completion_callback.h"
21 #include "net/base/net_export.h"
23 namespace base {
24 class FilePath;
25 class SingleThreadTaskRunner;
28 namespace net {
29 class IOBuffer;
30 class NetLog;
33 namespace disk_cache {
35 class Entry;
36 class Backend;
38 // Returns an instance of a Backend of the given |type|. |path| points to a
39 // folder where the cached data will be stored (if appropriate). This cache
40 // instance must be the only object that will be reading or writing files to
41 // that folder. The returned object should be deleted when not needed anymore.
42 // If |force| is true, and there is a problem with the cache initialization, the
43 // files will be deleted and a new set will be created. |max_bytes| is the
44 // maximum size the cache can grow to. If zero is passed in as |max_bytes|, the
45 // cache will determine the value to use. |thread| can be used to perform IO
46 // operations if a dedicated thread is required; a valid value is expected for
47 // any backend that performs operations on a disk. The returned pointer can be
48 // NULL if a fatal error is found. The actual return value of the function is a
49 // net error code. If this function returns ERR_IO_PENDING, the |callback| will
50 // be invoked when a backend is available or a fatal error condition is reached.
51 // The pointer to receive the |backend| must remain valid until the operation
52 // completes (the callback is notified).
53 NET_EXPORT int CreateCacheBackend(
54 net::CacheType type,
55 net::BackendType backend_type,
56 const base::FilePath& path,
57 int max_bytes,
58 bool force,
59 const scoped_refptr<base::SingleThreadTaskRunner>& thread,
60 net::NetLog* net_log,
61 scoped_ptr<Backend>* backend,
62 const net::CompletionCallback& callback);
64 // The root interface for a disk cache instance.
65 class NET_EXPORT Backend {
66 public:
67 typedef net::CompletionCallback CompletionCallback;
69 class Iterator {
70 public:
71 virtual ~Iterator() {}
73 // OpenNextEntry returns |net::OK| and provides |next_entry| if there is an
74 // entry to enumerate. It returns |net::ERR_FAILED| at the end of
75 // enumeration. If the function returns |net::ERR_IO_PENDING|, then the
76 // final result will be passed to the provided |callback|, otherwise
77 // |callback| will not be called. If any entry in the cache is modified
78 // during iteration, the result of this function is thereafter undefined.
80 // Calling OpenNextEntry after the backend which created it is destroyed
81 // may fail with |net::ERR_FAILED|; however it should not crash.
83 // Some cache backends make stronger guarantees about mutation during
84 // iteration, see top comment in simple_backend_impl.h for details.
85 virtual int OpenNextEntry(Entry** next_entry,
86 const CompletionCallback& callback) = 0;
89 // If the backend is destroyed when there are operations in progress (any
90 // callback that has not been invoked yet), this method cancels said
91 // operations so the callbacks are not invoked, possibly leaving the work
92 // half way (for instance, dooming just a few entries). Note that pending IO
93 // for a given Entry (as opposed to the Backend) will still generate a
94 // callback from within this method.
95 virtual ~Backend() {}
97 // Returns the type of this cache.
98 virtual net::CacheType GetCacheType() const = 0;
100 // Returns the number of entries in the cache.
101 virtual int32 GetEntryCount() const = 0;
103 // Opens an existing entry. Upon success, |entry| holds a pointer to an Entry
104 // object representing the specified disk cache entry. When the entry pointer
105 // is no longer needed, its Close method should be called. The return value is
106 // a net error code. If this method returns ERR_IO_PENDING, the |callback|
107 // will be invoked when the entry is available. The pointer to receive the
108 // |entry| must remain valid until the operation completes.
109 virtual int OpenEntry(const std::string& key, Entry** entry,
110 const CompletionCallback& callback) = 0;
112 // Creates a new entry. Upon success, the out param holds a pointer to an
113 // Entry object representing the newly created disk cache entry. When the
114 // entry pointer is no longer needed, its Close method should be called. The
115 // return value is a net error code. If this method returns ERR_IO_PENDING,
116 // the |callback| will be invoked when the entry is available. The pointer to
117 // receive the |entry| must remain valid until the operation completes.
118 virtual int CreateEntry(const std::string& key, Entry** entry,
119 const CompletionCallback& callback) = 0;
121 // Marks the entry, specified by the given key, for deletion. The return value
122 // is a net error code. If this method returns ERR_IO_PENDING, the |callback|
123 // will be invoked after the entry is doomed.
124 virtual int DoomEntry(const std::string& key,
125 const CompletionCallback& callback) = 0;
127 // Marks all entries for deletion. The return value is a net error code. If
128 // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
129 // operation completes.
130 virtual int DoomAllEntries(const CompletionCallback& callback) = 0;
132 // Marks a range of entries for deletion. This supports unbounded deletes in
133 // either direction by using null Time values for either argument. The return
134 // value is a net error code. If this method returns ERR_IO_PENDING, the
135 // |callback| will be invoked when the operation completes.
136 // Entries with |initial_time| <= access time < |end_time| are deleted.
137 virtual int DoomEntriesBetween(base::Time initial_time,
138 base::Time end_time,
139 const CompletionCallback& callback) = 0;
141 // Marks all entries accessed since |initial_time| for deletion. The return
142 // value is a net error code. If this method returns ERR_IO_PENDING, the
143 // |callback| will be invoked when the operation completes.
144 // Entries with |initial_time| <= access time are deleted.
145 virtual int DoomEntriesSince(base::Time initial_time,
146 const CompletionCallback& callback) = 0;
148 // Returns an iterator which will enumerate all entries of the cache in an
149 // undefined order.
150 virtual scoped_ptr<Iterator> CreateIterator() = 0;
152 // Return a list of cache statistics.
153 virtual void GetStats(base::StringPairs* stats) = 0;
155 // Called whenever an external cache in the system reuses the resource
156 // referred to by |key|.
157 virtual void OnExternalCacheHit(const std::string& key) = 0;
160 // This interface represents an entry in the disk cache.
161 class NET_EXPORT Entry {
162 public:
163 typedef net::CompletionCallback CompletionCallback;
164 typedef net::IOBuffer IOBuffer;
166 // Marks this cache entry for deletion.
167 virtual void Doom() = 0;
169 // Releases this entry. Calling this method does not cancel pending IO
170 // operations on this entry. Even after the last reference to this object has
171 // been released, pending completion callbacks may be invoked.
172 virtual void Close() = 0;
174 // Returns the key associated with this cache entry.
175 virtual std::string GetKey() const = 0;
177 // Returns the time when this cache entry was last used.
178 virtual base::Time GetLastUsed() const = 0;
180 // Returns the time when this cache entry was last modified.
181 virtual base::Time GetLastModified() const = 0;
183 // Returns the size of the cache data with the given index.
184 virtual int32 GetDataSize(int index) const = 0;
186 // Copies cached data into the given buffer of length |buf_len|. Returns the
187 // number of bytes read or a network error code. If this function returns
188 // ERR_IO_PENDING, the completion callback will be called on the current
189 // thread when the operation completes, and a reference to |buf| will be
190 // retained until the callback is called. Note that as long as the function
191 // does not complete immediately, the callback will always be invoked, even
192 // after Close has been called; in other words, the caller may close this
193 // entry without having to wait for all the callbacks, and still rely on the
194 // cleanup performed from the callback code.
195 virtual int ReadData(int index, int offset, IOBuffer* buf, int buf_len,
196 const CompletionCallback& callback) = 0;
198 // Copies data from the given buffer of length |buf_len| into the cache.
199 // Returns the number of bytes written or a network error code. If this
200 // function returns ERR_IO_PENDING, the completion callback will be called
201 // on the current thread when the operation completes, and a reference to
202 // |buf| will be retained until the callback is called. Note that as long as
203 // the function does not complete immediately, the callback will always be
204 // invoked, even after Close has been called; in other words, the caller may
205 // close this entry without having to wait for all the callbacks, and still
206 // rely on the cleanup performed from the callback code.
207 // If truncate is true, this call will truncate the stored data at the end of
208 // what we are writing here.
209 virtual int WriteData(int index, int offset, IOBuffer* buf, int buf_len,
210 const CompletionCallback& callback,
211 bool truncate) = 0;
213 // Sparse entries support:
215 // A Backend implementation can support sparse entries, so the cache keeps
216 // track of which parts of the entry have been written before. The backend
217 // will never return data that was not written previously, so reading from
218 // such region will return 0 bytes read (or actually the number of bytes read
219 // before reaching that region).
221 // There are only two streams for sparse entries: a regular control stream
222 // (index 0) that must be accessed through the regular API (ReadData and
223 // WriteData), and one sparse stream that must me accessed through the sparse-
224 // aware API that follows. Calling a non-sparse aware method with an index
225 // argument other than 0 is a mistake that results in implementation specific
226 // behavior. Using a sparse-aware method with an entry that was not stored
227 // using the same API, or with a backend that doesn't support sparse entries
228 // will return ERR_CACHE_OPERATION_NOT_SUPPORTED.
230 // The storage granularity of the implementation should be at least 1 KB. In
231 // other words, storing less than 1 KB may result in an implementation
232 // dropping the data completely, and writing at offsets not aligned with 1 KB,
233 // or with lengths not a multiple of 1 KB may result in the first or last part
234 // of the data being discarded. However, two consecutive writes should not
235 // result in a hole in between the two parts as long as they are sequential
236 // (the second one starts where the first one ended), and there is no other
237 // write between them.
239 // The Backend implementation is free to evict any range from the cache at any
240 // moment, so in practice, the previously stated granularity of 1 KB is not
241 // as bad as it sounds.
243 // The sparse methods don't support multiple simultaneous IO operations to the
244 // same physical entry, so in practice a single object should be instantiated
245 // for a given key at any given time. Once an operation has been issued, the
246 // caller should wait until it completes before starting another one. This
247 // requirement includes the case when an entry is closed while some operation
248 // is in progress and another object is instantiated; any IO operation will
249 // fail while the previous operation is still in-flight. In order to deal with
250 // this requirement, the caller could either wait until the operation
251 // completes before closing the entry, or call CancelSparseIO() before closing
252 // the entry, and call ReadyForSparseIO() on the new entry and wait for the
253 // callback before issuing new operations.
255 // Behaves like ReadData() except that this method is used to access sparse
256 // entries.
257 virtual int ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
258 const CompletionCallback& callback) = 0;
260 // Behaves like WriteData() except that this method is used to access sparse
261 // entries. |truncate| is not part of this interface because a sparse entry
262 // is not expected to be reused with new data. To delete the old data and
263 // start again, or to reduce the total size of the stream data (which implies
264 // that the content has changed), the whole entry should be doomed and
265 // re-created.
266 virtual int WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
267 const CompletionCallback& callback) = 0;
269 // Returns information about the currently stored portion of a sparse entry.
270 // |offset| and |len| describe a particular range that should be scanned to
271 // find out if it is stored or not. |start| will contain the offset of the
272 // first byte that is stored within this range, and the return value is the
273 // minimum number of consecutive stored bytes. Note that it is possible that
274 // this entry has stored more than the returned value. This method returns a
275 // net error code whenever the request cannot be completed successfully. If
276 // this method returns ERR_IO_PENDING, the |callback| will be invoked when the
277 // operation completes, and |start| must remain valid until that point.
278 virtual int GetAvailableRange(int64 offset, int len, int64* start,
279 const CompletionCallback& callback) = 0;
281 // Returns true if this entry could be a sparse entry or false otherwise. This
282 // is a quick test that may return true even if the entry is not really
283 // sparse. This method doesn't modify the state of this entry (it will not
284 // create sparse tracking data). GetAvailableRange or ReadSparseData can be
285 // used to perform a definitive test of whether an existing entry is sparse or
286 // not, but that method may modify the current state of the entry (making it
287 // sparse, for instance). The purpose of this method is to test an existing
288 // entry, but without generating actual IO to perform a thorough check.
289 virtual bool CouldBeSparse() const = 0;
291 // Cancels any pending sparse IO operation (if any). The completion callback
292 // of the operation in question will still be called when the operation
293 // finishes, but the operation will finish sooner when this method is used.
294 virtual void CancelSparseIO() = 0;
296 // Returns OK if this entry can be used immediately. If that is not the
297 // case, returns ERR_IO_PENDING and invokes the provided callback when this
298 // entry is ready to use. This method always returns OK for non-sparse
299 // entries, and returns ERR_IO_PENDING when a previous operation was cancelled
300 // (by calling CancelSparseIO), but the cache is still busy with it. If there
301 // is a pending operation that has not been cancelled, this method will return
302 // OK although another IO operation cannot be issued at this time; in this
303 // case the caller should just wait for the regular callback to be invoked
304 // instead of using this method to provide another callback.
306 // Note that CancelSparseIO may have been called on another instance of this
307 // object that refers to the same physical disk entry.
308 // Note: This method is deprecated.
309 virtual int ReadyForSparseIO(const CompletionCallback& callback) = 0;
311 protected:
312 virtual ~Entry() {}
315 struct EntryDeleter {
316 void operator()(Entry* entry) {
317 // Note that |entry| is ref-counted.
318 entry->Close();
322 // Automatically closes an entry when it goes out of scope.
323 typedef scoped_ptr<Entry, EntryDeleter> ScopedEntryPtr;
325 } // namespace disk_cache
327 #endif // NET_DISK_CACHE_DISK_CACHE_H_