3 * https://scan.coverity.com/models
5 * This is a modeling file for Coverity Scan.
6 * Modeling helps to avoid false positives.
8 * - Modeling doesn't need full structs and typedefs. Rudimentary structs
9 * and similar types are sufficient.
10 * - An uninitialized local pointer is not an error. It signifies that the
11 * variable could be either NULL or have some data.
13 * Coverity Scan doesn't pick up modifications automatically. The model file
14 * must be uploaded by an admin in the analysis settings.
16 * Some of this initially cribbed from:
18 * https://github.com/kees/coverity-linux/blob/trunk/model.c
20 * The below model was based on the original model by Brian Behlendorf for the
21 * original zfsonlinux/zfs repository. Some inspiration was taken from
22 * kees/coverity-linux, specifically involving memory copies.
27 #define KM_NOSLEEP 0x0001 /* cannot block for memory; may fail */
29 #define UMEM_DEFAULT 0x0000 /* normal -- may fail */
30 #define UMEM_NOFAIL 0x0100 /* Never fails */
39 typedef unsigned int uint_t
;
41 int condition0
, condition1
;
44 ddi_copyin(const void *from
, void *to
, size_t len
, int flags
)
47 __coverity_negative_sink__(len
);
48 __coverity_tainted_data_argument__(from
);
49 __coverity_tainted_data_argument__(to
);
50 __coverity_writeall__(to
);
54 memset(void *dst
, int c
, size_t len
)
56 __coverity_negative_sink__(len
);
58 __coverity_writeall0__(dst
);
60 __coverity_writeall__(dst
);
65 memmove(void *dst
, void *src
, size_t len
)
67 int first
= ((char *)src
)[0];
68 int last
= ((char *)src
)[len
-1];
70 __coverity_negative_sink__(len
);
71 __coverity_writeall__(dst
);
76 memcpy(void *dst
, void *src
, size_t len
)
78 int first
= ((char *)src
)[0];
79 int last
= ((char *)src
)[len
-1];
81 __coverity_negative_sink__(len
);
82 __coverity_writeall__(dst
);
87 umem_alloc_aligned(size_t size
, size_t align
, int kmflags
)
89 __coverity_negative_sink__(size
);
90 __coverity_negative_sink__(align
);
92 if (((UMEM_NOFAIL
& kmflags
) == UMEM_NOFAIL
) || condition0
) {
93 void *buf
= __coverity_alloc__(size
);
94 __coverity_mark_as_uninitialized_buffer__(buf
);
95 __coverity_mark_as_afm_allocated__(buf
, "umem_free");
103 umem_alloc(size_t size
, int kmflags
)
105 __coverity_negative_sink__(size
);
107 if (((UMEM_NOFAIL
& kmflags
) == UMEM_NOFAIL
) || condition0
) {
108 void *buf
= __coverity_alloc__(size
);
109 __coverity_mark_as_uninitialized_buffer__(buf
);
110 __coverity_mark_as_afm_allocated__(buf
, "umem_free");
118 umem_zalloc(size_t size
, int kmflags
)
120 __coverity_negative_sink__(size
);
122 if (((UMEM_NOFAIL
& kmflags
) == UMEM_NOFAIL
) || condition0
) {
123 void *buf
= __coverity_alloc__(size
);
124 __coverity_writeall0__(buf
);
125 __coverity_mark_as_afm_allocated__(buf
, "umem_free");
133 umem_free(void *buf
, size_t size
)
135 __coverity_negative_sink__(size
);
136 __coverity_free__(buf
);
139 typedef struct {} umem_cache_t
;
142 umem_cache_alloc(umem_cache_t
*skc
, int flags
)
147 __coverity_sleep__();
149 if (((UMEM_NOFAIL
& flags
) == UMEM_NOFAIL
) || condition0
) {
150 void *buf
= __coverity_alloc_nosize__();
151 __coverity_mark_as_uninitialized_buffer__(buf
);
152 __coverity_mark_as_afm_allocated__(buf
, "umem_cache_free");
160 umem_cache_free(umem_cache_t
*skc
, void *obj
)
164 __coverity_free__(obj
);
168 spl_kmem_alloc(size_t sz
, int fl
, const char *func
, int line
)
173 __coverity_negative_sink__(sz
);
176 __coverity_sleep__();
178 if (((fl
& KM_NOSLEEP
) != KM_NOSLEEP
) || condition0
) {
179 void *buf
= __coverity_alloc__(sz
);
180 __coverity_mark_as_uninitialized_buffer__(buf
);
181 __coverity_mark_as_afm_allocated__(buf
, "spl_kmem_free");
189 spl_kmem_zalloc(size_t sz
, int fl
, const char *func
, int line
)
194 __coverity_negative_sink__(sz
);
197 __coverity_sleep__();
199 if (((fl
& KM_NOSLEEP
) != KM_NOSLEEP
) || condition0
) {
200 void *buf
= __coverity_alloc__(sz
);
201 __coverity_writeall0__(buf
);
202 __coverity_mark_as_afm_allocated__(buf
, "spl_kmem_free");
210 spl_kmem_free(const void *ptr
, size_t sz
)
212 __coverity_negative_sink__(sz
);
213 __coverity_free__(ptr
);
217 kmem_vasprintf(const char *fmt
, va_list ap
)
219 char *buf
= __coverity_alloc_nosize__();
222 __coverity_string_null_sink__(fmt
);
223 __coverity_string_size_sink__(fmt
);
225 __coverity_writeall__(buf
);
227 __coverity_mark_as_afm_allocated__(buf
, "kmem_strfree");
233 kmem_asprintf(const char *fmt
, ...)
235 char *buf
= __coverity_alloc_nosize__();
237 __coverity_string_null_sink__(fmt
);
238 __coverity_string_size_sink__(fmt
);
240 __coverity_writeall__(buf
);
242 __coverity_mark_as_afm_allocated__(buf
, "kmem_strfree");
248 kmem_strdup(const char *str
)
250 char *buf
= __coverity_alloc_nosize__();
252 __coverity_string_null_sink__(str
);
253 __coverity_string_size_sink__(str
);
255 __coverity_writeall__(buf
);
257 __coverity_mark_as_afm_allocated__(buf
, "kmem_strfree");
265 kmem_strfree(char *str
)
267 __coverity_free__(str
);
271 spl_vmem_alloc(size_t sz
, int fl
, const char *func
, int line
)
276 __coverity_negative_sink__(sz
);
279 __coverity_sleep__();
281 if (((fl
& KM_NOSLEEP
) != KM_NOSLEEP
) || condition0
) {
282 void *buf
= __coverity_alloc__(sz
);
283 __coverity_mark_as_uninitialized_buffer__(buf
);
284 __coverity_mark_as_afm_allocated__(buf
, "spl_vmem_free");
292 spl_vmem_zalloc(size_t sz
, int fl
, const char *func
, int line
)
298 __coverity_sleep__();
300 if (((fl
& KM_NOSLEEP
) != KM_NOSLEEP
) || condition0
) {
301 void *buf
= __coverity_alloc__(sz
);
302 __coverity_writeall0__(buf
);
303 __coverity_mark_as_afm_allocated__(buf
, "spl_vmem_free");
311 spl_vmem_free(const void *ptr
, size_t sz
)
313 __coverity_negative_sink__(sz
);
314 __coverity_free__(ptr
);
317 typedef struct {} spl_kmem_cache_t
;
320 spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
)
325 __coverity_sleep__();
327 if ((flags
== 0) || condition0
) {
328 void *buf
= __coverity_alloc_nosize__();
329 __coverity_mark_as_uninitialized_buffer__(buf
);
330 __coverity_mark_as_afm_allocated__(buf
, "spl_kmem_cache_free");
336 spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
)
340 __coverity_free__(obj
);
343 typedef struct {} zfsvfs_t
;
346 zfsvfs_create(const char *osname
, boolean_t readonly
, zfsvfs_t
**zfvp
)
352 __coverity_sleep__();
355 *zfvp
= __coverity_alloc_nosize__();
356 __coverity_writeall__(*zfvp
);
364 zfsvfs_free(zfsvfs_t
*zfsvfs
)
366 __coverity_free__(zfsvfs
);
369 typedef struct {} nvlist_t
;
372 nvlist_alloc(nvlist_t
**nvlp
, uint_t nvflag
, int kmflag
)
377 __coverity_sleep__();
379 if ((kmflag
== 0) || condition0
) {
380 *nvlp
= __coverity_alloc_nosize__();
381 __coverity_mark_as_afm_allocated__(*nvlp
, "nvlist_free");
382 __coverity_writeall__(*nvlp
);
391 nvlist_dup(const nvlist_t
*nvl
, nvlist_t
**nvlp
, int kmflag
)
393 nvlist_t read
= *nvl
;
396 __coverity_sleep__();
398 if ((kmflag
== 0) || condition0
) {
399 nvlist_t
*nvl
= __coverity_alloc_nosize__();
400 __coverity_mark_as_afm_allocated__(nvl
, "nvlist_free");
401 __coverity_writeall__(nvl
);
410 nvlist_free(nvlist_t
*nvl
)
412 __coverity_free__(nvl
);
416 nvlist_pack(nvlist_t
*nvl
, char **bufp
, size_t *buflen
, int encoding
,
424 __coverity_sleep__();
426 if ((kmflag
== 0) || condition0
) {
427 char *buf
= __coverity_alloc_nosize__();
428 __coverity_writeall__(buf
);
430 * We cannot use __coverity_mark_as_afm_allocated__()
431 * because the free function varies between the kernel
442 * Unfortunately, errors from the buffer being too small are not
443 * possible to model, so we assume success.
445 __coverity_negative_sink__(*buflen
);
446 __coverity_writeall__(*bufp
);
452 nvlist_unpack(char *buf
, size_t buflen
, nvlist_t
**nvlp
, int kmflag
)
454 __coverity_negative_sink__(buflen
);
457 __coverity_sleep__();
459 if ((kmflag
== 0) || condition0
) {
460 nvlist_t
*nvl
= __coverity_alloc_nosize__();
461 __coverity_mark_as_afm_allocated__(nvl
, "nvlist_free");
462 __coverity_writeall__(nvl
);
465 int last
= buf
[buflen
-1];
476 void *buf
= __coverity_alloc__(size
);
479 __coverity_sleep__();
481 __coverity_negative_sink__(size
);
482 __coverity_mark_as_uninitialized_buffer__(buf
);
483 __coverity_mark_as_afm_allocated__(buf
, "free");
489 calloc(size_t nmemb
, size_t size
)
491 void *buf
= __coverity_alloc__(size
* nmemb
);
494 __coverity_sleep__();
496 __coverity_negative_sink__(size
);
497 __coverity_writeall0__(buf
);
498 __coverity_mark_as_afm_allocated__(buf
, "free");
504 __coverity_free__(buf
);
510 __coverity_sleep__();
513 typedef struct {} kmutex_t
;
514 typedef struct {} krwlock_t
;
518 * Coverty reportedly does not support macros, so this only works for
523 mutex_enter(kmutex_t
*mp
)
526 __coverity_sleep__();
528 __coverity_exclusive_lock_acquire__(mp
);
532 mutex_tryenter(kmutex_t
*mp
)
535 __coverity_exclusive_lock_acquire__(mp
);
543 mutex_exit(kmutex_t
*mp
)
545 __coverity_exclusive_lock_release__(mp
);
549 rw_enter(krwlock_t
*rwlp
, krw_t rw
)
554 __coverity_sleep__();
556 __coverity_recursive_lock_acquire__(rwlp
);
560 rw_exit(krwlock_t
*rwlp
)
562 __coverity_recursive_lock_release__(rwlp
);
567 rw_tryenter(krwlock_t
*rwlp
, krw_t rw
)
570 __coverity_recursive_lock_acquire__(rwlp
);
577 /* Thus, we fallback to the Linux kernel locks */
579 struct {} rw_semaphore
;
582 mutex_lock(struct mutex
*lock
)
585 __coverity_sleep__();
587 __coverity_exclusive_lock_acquire__(lock
);
591 mutex_unlock(struct mutex
*lock
)
593 __coverity_exclusive_lock_release__(lock
);
597 down_read(struct rw_semaphore
*sem
)
600 __coverity_sleep__();
602 __coverity_recursive_lock_acquire__(sem
);
606 down_write(struct rw_semaphore
*sem
)
609 __coverity_sleep__();
611 __coverity_recursive_lock_acquire__(sem
);
615 down_read_trylock(struct rw_semaphore
*sem
)
618 __coverity_recursive_lock_acquire__(sem
);
626 down_write_trylock(struct rw_semaphore
*sem
)
629 __coverity_recursive_lock_acquire__(sem
);
637 up_read(struct rw_semaphore
*sem
)
639 __coverity_recursive_lock_release__(sem
);
643 up_write(struct rw_semaphore
*sem
)
645 __coverity_recursive_lock_release__(sem
);
652 __coverity_sleep__();