fix: make zfs_strerror really thread-safe and portable
[zfs.git] / contrib / coverity / model.c
blob8b4d14ee22a2262315e92619062f8c2c37b65fad
1 /*
2 * Coverity Scan model
3 * https://scan.coverity.com/models
5 * This is a modeling file for Coverity Scan.
6 * Modeling helps to avoid false positives.
8 * - Modeling doesn't need full structs and typedefs. Rudimentary structs
9 * and similar types are sufficient.
10 * - An uninitialized local pointer is not an error. It signifies that the
11 * variable could be either NULL or have some data.
13 * Coverity Scan doesn't pick up modifications automatically. The model file
14 * must be uploaded by an admin in the analysis settings.
16 * Some of this initially cribbed from:
18 * https://github.com/kees/coverity-linux/blob/trunk/model.c
20 * The below model was based on the original model by Brian Behlendorf for the
21 * original zfsonlinux/zfs repository. Some inspiration was taken from
22 * kees/coverity-linux, specifically involving memory copies.
25 #include <stdarg.h>
27 #define KM_NOSLEEP 0x0001 /* cannot block for memory; may fail */
29 #define UMEM_DEFAULT 0x0000 /* normal -- may fail */
30 #define UMEM_NOFAIL 0x0100 /* Never fails */
32 #define NULL (0)
34 typedef enum {
35 B_FALSE = 0,
36 B_TRUE = 1
37 } boolean_t;
39 typedef unsigned int uint_t;
41 int condition0, condition1;
43 int
44 ddi_copyin(const void *from, void *to, size_t len, int flags)
46 (void) flags;
47 __coverity_negative_sink__(len);
48 __coverity_tainted_data_argument__(from);
49 __coverity_tainted_data_argument__(to);
50 __coverity_writeall__(to);
53 void *
54 memset(void *dst, int c, size_t len)
56 __coverity_negative_sink__(len);
57 if (c == 0)
58 __coverity_writeall0__(dst);
59 else
60 __coverity_writeall__(dst);
61 return (dst);
64 void *
65 memmove(void *dst, void *src, size_t len)
67 int first = ((char *)src)[0];
68 int last = ((char *)src)[len-1];
70 __coverity_negative_sink__(len);
71 __coverity_writeall__(dst);
72 return (dst);
75 void *
76 memcpy(void *dst, void *src, size_t len)
78 int first = ((char *)src)[0];
79 int last = ((char *)src)[len-1];
81 __coverity_negative_sink__(len);
82 __coverity_writeall__(dst);
83 return (dst);
86 void *
87 umem_alloc_aligned(size_t size, size_t align, int kmflags)
89 __coverity_negative_sink__(size);
90 __coverity_negative_sink__(align);
92 if (((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL) || condition0) {
93 void *buf = __coverity_alloc__(size);
94 __coverity_mark_as_uninitialized_buffer__(buf);
95 __coverity_mark_as_afm_allocated__(buf, "umem_free");
96 return (buf);
99 return (NULL);
102 void *
103 umem_alloc(size_t size, int kmflags)
105 __coverity_negative_sink__(size);
107 if (((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL) || condition0) {
108 void *buf = __coverity_alloc__(size);
109 __coverity_mark_as_uninitialized_buffer__(buf);
110 __coverity_mark_as_afm_allocated__(buf, "umem_free");
111 return (buf);
114 return (NULL);
117 void *
118 umem_zalloc(size_t size, int kmflags)
120 __coverity_negative_sink__(size);
122 if (((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL) || condition0) {
123 void *buf = __coverity_alloc__(size);
124 __coverity_writeall0__(buf);
125 __coverity_mark_as_afm_allocated__(buf, "umem_free");
126 return (buf);
129 return (NULL);
132 void
133 umem_free(void *buf, size_t size)
135 __coverity_negative_sink__(size);
136 __coverity_free__(buf);
139 typedef struct {} umem_cache_t;
141 void *
142 umem_cache_alloc(umem_cache_t *skc, int flags)
144 (void) skc;
146 if (condition1)
147 __coverity_sleep__();
149 if (((UMEM_NOFAIL & flags) == UMEM_NOFAIL) || condition0) {
150 void *buf = __coverity_alloc_nosize__();
151 __coverity_mark_as_uninitialized_buffer__(buf);
152 __coverity_mark_as_afm_allocated__(buf, "umem_cache_free");
153 return (buf);
156 return (NULL);
159 void
160 umem_cache_free(umem_cache_t *skc, void *obj)
162 (void) skc;
164 __coverity_free__(obj);
167 void *
168 spl_kmem_alloc(size_t sz, int fl, const char *func, int line)
170 (void) func;
171 (void) line;
173 __coverity_negative_sink__(sz);
175 if (condition1)
176 __coverity_sleep__();
178 if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) {
179 void *buf = __coverity_alloc__(sz);
180 __coverity_mark_as_uninitialized_buffer__(buf);
181 __coverity_mark_as_afm_allocated__(buf, "spl_kmem_free");
182 return (buf);
185 return (NULL);
188 void *
189 spl_kmem_zalloc(size_t sz, int fl, const char *func, int line)
191 (void) func;
192 (void) line;
194 __coverity_negative_sink__(sz);
196 if (condition1)
197 __coverity_sleep__();
199 if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) {
200 void *buf = __coverity_alloc__(sz);
201 __coverity_writeall0__(buf);
202 __coverity_mark_as_afm_allocated__(buf, "spl_kmem_free");
203 return (buf);
206 return (NULL);
209 void
210 spl_kmem_free(const void *ptr, size_t sz)
212 __coverity_negative_sink__(sz);
213 __coverity_free__(ptr);
216 char *
217 kmem_vasprintf(const char *fmt, va_list ap)
219 char *buf = __coverity_alloc_nosize__();
220 (void) ap;
222 __coverity_string_null_sink__(fmt);
223 __coverity_string_size_sink__(fmt);
225 __coverity_writeall__(buf);
227 __coverity_mark_as_afm_allocated__(buf, "kmem_strfree");
229 return (buf);
232 char *
233 kmem_asprintf(const char *fmt, ...)
235 char *buf = __coverity_alloc_nosize__();
237 __coverity_string_null_sink__(fmt);
238 __coverity_string_size_sink__(fmt);
240 __coverity_writeall__(buf);
242 __coverity_mark_as_afm_allocated__(buf, "kmem_strfree");
244 return (buf);
247 char *
248 kmem_strdup(const char *str)
250 char *buf = __coverity_alloc_nosize__();
252 __coverity_string_null_sink__(str);
253 __coverity_string_size_sink__(str);
255 __coverity_writeall__(buf);
257 __coverity_mark_as_afm_allocated__(buf, "kmem_strfree");
259 return (buf);
264 void
265 kmem_strfree(char *str)
267 __coverity_free__(str);
270 void *
271 spl_vmem_alloc(size_t sz, int fl, const char *func, int line)
273 (void) func;
274 (void) line;
276 __coverity_negative_sink__(sz);
278 if (condition1)
279 __coverity_sleep__();
281 if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) {
282 void *buf = __coverity_alloc__(sz);
283 __coverity_mark_as_uninitialized_buffer__(buf);
284 __coverity_mark_as_afm_allocated__(buf, "spl_vmem_free");
285 return (buf);
288 return (NULL);
291 void *
292 spl_vmem_zalloc(size_t sz, int fl, const char *func, int line)
294 (void) func;
295 (void) line;
297 if (condition1)
298 __coverity_sleep__();
300 if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) {
301 void *buf = __coverity_alloc__(sz);
302 __coverity_writeall0__(buf);
303 __coverity_mark_as_afm_allocated__(buf, "spl_vmem_free");
304 return (buf);
307 return (NULL);
310 void
311 spl_vmem_free(const void *ptr, size_t sz)
313 __coverity_negative_sink__(sz);
314 __coverity_free__(ptr);
317 typedef struct {} spl_kmem_cache_t;
319 void *
320 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
322 (void) skc;
324 if (condition1)
325 __coverity_sleep__();
327 if ((flags == 0) || condition0) {
328 void *buf = __coverity_alloc_nosize__();
329 __coverity_mark_as_uninitialized_buffer__(buf);
330 __coverity_mark_as_afm_allocated__(buf, "spl_kmem_cache_free");
331 return (buf);
335 void
336 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
338 (void) skc;
340 __coverity_free__(obj);
343 typedef struct {} zfsvfs_t;
346 zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
348 (void) osname;
349 (void) readonly;
351 if (condition1)
352 __coverity_sleep__();
354 if (condition0) {
355 *zfvp = __coverity_alloc_nosize__();
356 __coverity_writeall__(*zfvp);
357 return (0);
360 return (1);
363 void
364 zfsvfs_free(zfsvfs_t *zfsvfs)
366 __coverity_free__(zfsvfs);
369 typedef struct {} nvlist_t;
372 nvlist_alloc(nvlist_t **nvlp, uint_t nvflag, int kmflag)
374 (void) nvflag;
376 if (condition1)
377 __coverity_sleep__();
379 if ((kmflag == 0) || condition0) {
380 *nvlp = __coverity_alloc_nosize__();
381 __coverity_mark_as_afm_allocated__(*nvlp, "nvlist_free");
382 __coverity_writeall__(*nvlp);
383 return (0);
386 return (-1);
391 nvlist_dup(const nvlist_t *nvl, nvlist_t **nvlp, int kmflag)
393 nvlist_t read = *nvl;
395 if (condition1)
396 __coverity_sleep__();
398 if ((kmflag == 0) || condition0) {
399 nvlist_t *nvl = __coverity_alloc_nosize__();
400 __coverity_mark_as_afm_allocated__(nvl, "nvlist_free");
401 __coverity_writeall__(nvl);
402 *nvlp = nvl;
403 return (0);
406 return (-1);
409 void
410 nvlist_free(nvlist_t *nvl)
412 __coverity_free__(nvl);
416 nvlist_pack(nvlist_t *nvl, char **bufp, size_t *buflen, int encoding,
417 int kmflag)
419 (void) nvl;
420 (void) encoding;
422 if (*bufp == NULL) {
423 if (condition1)
424 __coverity_sleep__();
426 if ((kmflag == 0) || condition0) {
427 char *buf = __coverity_alloc_nosize__();
428 __coverity_writeall__(buf);
430 * We cannot use __coverity_mark_as_afm_allocated__()
431 * because the free function varies between the kernel
432 * and userspace.
434 *bufp = buf;
435 return (0);
438 return (-1);
442 * Unfortunately, errors from the buffer being too small are not
443 * possible to model, so we assume success.
445 __coverity_negative_sink__(*buflen);
446 __coverity_writeall__(*bufp);
447 return (0);
452 nvlist_unpack(char *buf, size_t buflen, nvlist_t **nvlp, int kmflag)
454 __coverity_negative_sink__(buflen);
456 if (condition1)
457 __coverity_sleep__();
459 if ((kmflag == 0) || condition0) {
460 nvlist_t *nvl = __coverity_alloc_nosize__();
461 __coverity_mark_as_afm_allocated__(nvl, "nvlist_free");
462 __coverity_writeall__(nvl);
463 *nvlp = nvl;
464 int first = buf[0];
465 int last = buf[buflen-1];
466 return (0);
469 return (-1);
473 void *
474 malloc(size_t size)
476 void *buf = __coverity_alloc__(size);
478 if (condition1)
479 __coverity_sleep__();
481 __coverity_negative_sink__(size);
482 __coverity_mark_as_uninitialized_buffer__(buf);
483 __coverity_mark_as_afm_allocated__(buf, "free");
485 return (buf);
488 void *
489 calloc(size_t nmemb, size_t size)
491 void *buf = __coverity_alloc__(size * nmemb);
493 if (condition1)
494 __coverity_sleep__();
496 __coverity_negative_sink__(size);
497 __coverity_writeall0__(buf);
498 __coverity_mark_as_afm_allocated__(buf, "free");
499 return (buf);
501 void
502 free(void *buf)
504 __coverity_free__(buf);
508 sched_yield(void)
510 __coverity_sleep__();
513 typedef struct {} kmutex_t;
514 typedef struct {} krwlock_t;
515 typedef int krw_t;
518 * Coverty reportedly does not support macros, so this only works for
519 * userspace.
522 void
523 mutex_enter(kmutex_t *mp)
525 if (condition0)
526 __coverity_sleep__();
528 __coverity_exclusive_lock_acquire__(mp);
532 mutex_tryenter(kmutex_t *mp)
534 if (condition0) {
535 __coverity_exclusive_lock_acquire__(mp);
536 return (1);
539 return (0);
542 void
543 mutex_exit(kmutex_t *mp)
545 __coverity_exclusive_lock_release__(mp);
548 void
549 rw_enter(krwlock_t *rwlp, krw_t rw)
551 (void) rw;
553 if (condition0)
554 __coverity_sleep__();
556 __coverity_recursive_lock_acquire__(rwlp);
559 void
560 rw_exit(krwlock_t *rwlp)
562 __coverity_recursive_lock_release__(rwlp);
567 rw_tryenter(krwlock_t *rwlp, krw_t rw)
569 if (condition0) {
570 __coverity_recursive_lock_acquire__(rwlp);
571 return (1);
574 return (0);
577 /* Thus, we fallback to the Linux kernel locks */
578 struct {} mutex;
579 struct {} rw_semaphore;
581 void
582 mutex_lock(struct mutex *lock)
584 if (condition0) {
585 __coverity_sleep__();
587 __coverity_exclusive_lock_acquire__(lock);
590 void
591 mutex_unlock(struct mutex *lock)
593 __coverity_exclusive_lock_release__(lock);
596 void
597 down_read(struct rw_semaphore *sem)
599 if (condition0) {
600 __coverity_sleep__();
602 __coverity_recursive_lock_acquire__(sem);
605 void
606 down_write(struct rw_semaphore *sem)
608 if (condition0) {
609 __coverity_sleep__();
611 __coverity_recursive_lock_acquire__(sem);
615 down_read_trylock(struct rw_semaphore *sem)
617 if (condition0) {
618 __coverity_recursive_lock_acquire__(sem);
619 return (1);
622 return (0);
626 down_write_trylock(struct rw_semaphore *sem)
628 if (condition0) {
629 __coverity_recursive_lock_acquire__(sem);
630 return (1);
633 return (0);
636 void
637 up_read(struct rw_semaphore *sem)
639 __coverity_recursive_lock_release__(sem);
642 void
643 up_write(struct rw_semaphore *sem)
645 __coverity_recursive_lock_release__(sem);
649 __cond_resched(void)
651 if (condition0) {
652 __coverity_sleep__();