2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "platform/SharedBuffer.h"
30 #include "wtf/text/UTF8.h"
31 #include "wtf/text/Unicode.h"
33 #undef SHARED_BUFFER_STATS
35 #ifdef SHARED_BUFFER_STATS
36 #include "public/platform/Platform.h"
37 #include "public/platform/WebTraceLocation.h"
38 #include "wtf/DataLog.h"
43 static inline unsigned segmentIndex(unsigned position
)
45 return position
/ SharedBuffer::kSegmentSize
;
48 static inline unsigned offsetInSegment(unsigned position
)
50 return position
% SharedBuffer::kSegmentSize
;
53 static inline char* allocateSegment()
55 return static_cast<char*>(fastMalloc(SharedBuffer::kSegmentSize
));
58 static inline void freeSegment(char* p
)
63 #ifdef SHARED_BUFFER_STATS
65 static Mutex
& statsMutex()
67 DEFINE_STATIC_LOCAL(Mutex
, mutex
, ());
71 static HashSet
<SharedBuffer
*>& liveBuffers()
73 DEFINE_STATIC_LOCAL(HashSet
<SharedBuffer
*>, buffers
, ());
77 static bool sizeComparator(SharedBuffer
* a
, SharedBuffer
* b
)
79 return a
->size() > b
->size();
82 static CString
snippetForBuffer(SharedBuffer
* sharedBuffer
)
84 const unsigned kMaxSnippetLength
= 64;
86 unsigned snippetLength
= std::min(sharedBuffer
->size(), kMaxSnippetLength
);
87 CString result
= CString::newUninitialized(snippetLength
, snippet
);
91 while (unsigned segmentLength
= sharedBuffer
->getSomeData(segment
, offset
)) {
92 unsigned length
= std::min(segmentLength
, snippetLength
- offset
);
93 memcpy(snippet
+ offset
, segment
, length
);
94 offset
+= segmentLength
;
95 if (offset
>= snippetLength
)
99 for (unsigned i
= 0; i
< snippetLength
; ++i
) {
100 if (!isASCIIPrintable(snippet
[i
]))
107 static void printStats()
109 MutexLocker
locker(statsMutex());
110 Vector
<SharedBuffer
*> buffers
;
111 for (HashSet
<SharedBuffer
*>::const_iterator iter
= liveBuffers().begin(); iter
!= liveBuffers().end(); ++iter
)
112 buffers
.append(*iter
);
113 std::sort(buffers
.begin(), buffers
.end(), sizeComparator
);
115 dataLogF("---- Shared Buffer Stats ----\n");
116 for (size_t i
= 0; i
< buffers
.size() && i
< 64; ++i
) {
117 CString snippet
= snippetForBuffer(buffers
[i
]);
118 dataLogF("Buffer size=%8u %s\n", buffers
[i
]->size(), snippet
.data());
122 static void didCreateSharedBuffer(SharedBuffer
* buffer
)
124 MutexLocker
locker(statsMutex());
125 liveBuffers().add(buffer
);
127 Platform::current()->mainThread()->postTask(FROM_HERE
, bind(&printStats
));
130 static void willDestroySharedBuffer(SharedBuffer
* buffer
)
132 MutexLocker
locker(statsMutex());
133 liveBuffers().remove(buffer
);
138 SharedBuffer::SharedBuffer()
140 , m_buffer(PurgeableVector::NotPurgeable
)
142 #ifdef SHARED_BUFFER_STATS
143 didCreateSharedBuffer(this);
147 SharedBuffer::SharedBuffer(size_t size
)
149 , m_buffer(PurgeableVector::NotPurgeable
)
151 m_buffer
.reserveCapacity(size
);
153 #ifdef SHARED_BUFFER_STATS
154 didCreateSharedBuffer(this);
158 SharedBuffer::SharedBuffer(const char* data
, int size
)
160 , m_buffer(PurgeableVector::NotPurgeable
)
162 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
168 #ifdef SHARED_BUFFER_STATS
169 didCreateSharedBuffer(this);
173 SharedBuffer::SharedBuffer(const char* data
, unsigned size
, PurgeableVector::PurgeableOption purgeable
)
175 , m_buffer(purgeable
)
179 #ifdef SHARED_BUFFER_STATS
180 didCreateSharedBuffer(this);
184 SharedBuffer::SharedBuffer(const unsigned char* data
, int size
)
186 , m_buffer(PurgeableVector::NotPurgeable
)
188 // FIXME: Use unsigned consistently, and check for invalid casts when calling into SharedBuffer from other code.
192 append(reinterpret_cast<const char*>(data
), size
);
194 #ifdef SHARED_BUFFER_STATS
195 didCreateSharedBuffer(this);
199 SharedBuffer::~SharedBuffer()
203 #ifdef SHARED_BUFFER_STATS
204 willDestroySharedBuffer(this);
208 PassRefPtr
<SharedBuffer
> SharedBuffer::adoptVector(Vector
<char>& vector
)
210 RefPtr
<SharedBuffer
> buffer
= create();
211 buffer
->m_buffer
.adopt(vector
);
212 buffer
->m_size
= buffer
->m_buffer
.size();
213 return buffer
.release();
216 unsigned SharedBuffer::size() const
221 const char* SharedBuffer::data() const
223 mergeSegmentsIntoBuffer();
224 return m_buffer
.data();
227 void SharedBuffer::append(PassRefPtr
<SharedBuffer
> data
)
231 while (size_t length
= data
->getSomeData(segment
, position
)) {
232 append(segment
, length
);
237 void SharedBuffer::append(const char* data
, unsigned length
)
243 ASSERT(m_size
>= m_buffer
.size());
244 unsigned positionInSegment
= offsetInSegment(m_size
- m_buffer
.size());
247 if (m_size
<= kSegmentSize
) {
248 // No need to use segments for small resource data.
249 m_buffer
.append(data
, length
);
254 if (!positionInSegment
) {
255 segment
= allocateSegment();
256 m_segments
.append(segment
);
258 segment
= m_segments
.last() + positionInSegment
;
260 unsigned segmentFreeSpace
= kSegmentSize
- positionInSegment
;
261 unsigned bytesToCopy
= std::min(length
, segmentFreeSpace
);
264 memcpy(segment
, data
, bytesToCopy
);
265 if (static_cast<unsigned>(length
) == bytesToCopy
)
268 length
-= bytesToCopy
;
270 segment
= allocateSegment();
271 m_segments
.append(segment
);
272 bytesToCopy
= std::min(length
, static_cast<unsigned>(kSegmentSize
));
276 void SharedBuffer::append(const Vector
<char>& data
)
278 append(data
.data(), data
.size());
281 void SharedBuffer::clear()
283 for (unsigned i
= 0; i
< m_segments
.size(); ++i
)
284 freeSegment(m_segments
[i
]);
291 PassRefPtr
<SharedBuffer
> SharedBuffer::copy() const
293 RefPtr
<SharedBuffer
> clone(adoptRef(new SharedBuffer
));
294 clone
->m_size
= m_size
;
295 clone
->m_buffer
.reserveCapacity(m_size
);
296 clone
->m_buffer
.append(m_buffer
.data(), m_buffer
.size());
297 if (!m_segments
.isEmpty()) {
298 const char* segment
= 0;
299 unsigned position
= m_buffer
.size();
300 while (unsigned segmentSize
= getSomeData(segment
, position
)) {
301 clone
->m_buffer
.append(segment
, segmentSize
);
302 position
+= segmentSize
;
304 ASSERT(position
== clone
->size());
306 return clone
.release();
309 void SharedBuffer::mergeSegmentsIntoBuffer() const
311 unsigned bufferSize
= m_buffer
.size();
312 if (m_size
> bufferSize
) {
313 unsigned bytesLeft
= m_size
- bufferSize
;
314 for (unsigned i
= 0; i
< m_segments
.size(); ++i
) {
315 unsigned bytesToCopy
= std::min(bytesLeft
, static_cast<unsigned>(kSegmentSize
));
316 m_buffer
.append(m_segments
[i
], bytesToCopy
);
317 bytesLeft
-= bytesToCopy
;
318 freeSegment(m_segments
[i
]);
324 unsigned SharedBuffer::getSomeData(const char*& someData
, unsigned position
) const
327 unsigned totalSize
= size();
328 if (position
>= totalSize
) {
333 ASSERT_WITH_SECURITY_IMPLICATION(position
< m_size
);
334 unsigned consecutiveSize
= m_buffer
.size();
335 if (position
< consecutiveSize
) {
336 someData
= m_buffer
.data() + position
;
337 return consecutiveSize
- position
;
340 position
-= consecutiveSize
;
341 unsigned segments
= m_segments
.size();
342 unsigned maxSegmentedSize
= segments
* kSegmentSize
;
343 unsigned segment
= segmentIndex(position
);
344 if (segment
< segments
) {
345 unsigned bytesLeft
= totalSize
- consecutiveSize
;
346 unsigned segmentedSize
= std::min(maxSegmentedSize
, bytesLeft
);
348 unsigned positionInSegment
= offsetInSegment(position
);
349 someData
= m_segments
[segment
] + positionInSegment
;
350 return segment
== segments
- 1 ? segmentedSize
- position
: kSegmentSize
- positionInSegment
;
352 ASSERT_NOT_REACHED();
356 bool SharedBuffer::getAsBytes(void* dest
, unsigned byteLength
) const
358 if (!dest
|| byteLength
!= size())
361 const char* segment
= 0;
362 unsigned position
= 0;
363 while (unsigned segmentSize
= getSomeData(segment
, position
)) {
364 memcpy(static_cast<char*>(dest
) + position
, segment
, segmentSize
);
365 position
+= segmentSize
;
368 if (position
!= byteLength
) {
369 ASSERT_NOT_REACHED();
370 // Don't return the incomplete data.
377 PassRefPtr
<SkData
> SharedBuffer::getAsSkData() const
379 unsigned bufferLength
= size();
380 SkData
* data
= SkData::NewUninitialized(bufferLength
);
381 char* buffer
= static_cast<char*>(data
->writable_data());
382 const char* segment
= 0;
383 unsigned position
= 0;
384 while (unsigned segmentSize
= getSomeData(segment
, position
)) {
385 memcpy(buffer
+ position
, segment
, segmentSize
);
386 position
+= segmentSize
;
389 if (position
!= bufferLength
) {
390 ASSERT_NOT_REACHED();
391 // Don't return the incomplete SkData.
394 return adoptRef(data
);
397 bool SharedBuffer::lock()
399 return m_buffer
.lock();
402 void SharedBuffer::unlock()
404 mergeSegmentsIntoBuffer();
408 bool SharedBuffer::isLocked() const
410 return m_buffer
.isLocked();