Set memory attributes on page
[pscnv.git] / pscnv / bsd_support.h
blob6b06fa21cd88755411b5f679616edc69db49c896
1 // Note: Need to change /sys/conf/kmod.conf for FreeBSD
2 // set CSTD to gnu99 to enable support for nameless unions/structs
4 // Some headers used from /sys/ofed/include which all bear the following copyright:
5 /*-
6 * Copyright (c) 2010 Isilon Systems, Inc.
7 * Copyright (c) 2010 iX Systems, Inc.
8 * Copyright (c) 2010 Panasas, Inc.
9 * All rights reserved.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
16 * disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/param.h>
34 #include <sys/types.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/limits.h>
38 #include <sys/kdb.h>
39 #include "drmP.h"
41 #define PSCNV_KAPI_GETPARAM_BUS_TYPE
43 typedef DRM_SPINTYPE spinlock_t;
45 #ifndef cpu_to_le16
46 #define cpu_to_le16(x) htole16(x)
47 #define le16_to_cpu(x) le16toh(x)
48 #endif
50 #define spin_lock_init(lock) DRM_SPININIT(lock, #lock)
51 #define spin_lock_destroy(lock) DRM_SPINUNINIT(lock)
52 #define spin_lock(lock) DRM_SPINLOCK(lock)
53 #define spin_unlock(lock) DRM_SPINUNLOCK(lock)
54 #define spin_lock_irqsave(lock, flags) DRM_SPINLOCK_IRQSAVE(lock, flags)
55 #define spin_unlock_irqrestore(lock, flags) DRM_SPINUNLOCK_IRQRESTORE(lock, flags)
57 #define kfree(x) drm_free(x, 0, DRM_MEM_DRIVER)
58 #define kzalloc(x, y) drm_calloc(x, 1, DRM_MEM_DRIVER)
59 #define kcalloc(x, y, z) drm_calloc(x, y, DRM_MEM_DRIVER)
60 #define kmalloc(x, y) drm_alloc(x, DRM_MEM_DRIVER)
62 struct device_attribute {};
63 struct notifier_block {};
64 typedef struct pm_message_t { } pm_message_t;
65 struct fb_info;
66 struct fb_copyarea;
67 struct fb_fillrect;
68 struct fb_image;
70 struct vm_fault {};
71 struct vm_area_struct {};
73 #ifndef HZ
74 #define HZ hz
75 #endif
77 #define power_supply_is_system_supplied() 1
79 /* Dog slow version, but only called once
80 * Linux kernel has a faster software fallback
81 * but I prefer not to GPL this or even worry about it
83 static inline u32 hweight32(u32 val)
85 u32 i, ret = 0;
86 for (i = 1; i; i <<= 1)
87 if (val & i)
88 ++ret;
89 return ret;
92 #define WARN(x, arg1, args...) do { \
93 if (!(x)) break; \
94 printf("%s:%d/%s " arg1, \
95 __FILE__, __LINE__, __FUNCTION__, ##args); \
96 kdb_backtrace(); \
97 } while (0)
98 #define BUG() panic("BUG()\n");
99 #define WARN_ON(x,y,z...) do { if ((x)) WARN(y, ##z); } while (0)
100 #define BUG_ON(x) do { if ((x)) { panic(#x "triggered\n"); } } while (0)
102 #ifndef __must_check
103 #define __must_check
104 #endif
106 #ifndef _LINUX_DELAY_H_
107 #define _LINUX_DELAY_H_
109 static inline void
110 linux_msleep(int ms)
112 pause("lnxsleep", msecs_to_jiffies(ms));
115 #undef msleep
116 #define msleep linux_msleep
117 #define udelay(x) DELAY((x))
118 #define mdelay(x) DELAY((x) * 1000)
120 #endif /* _LINUX_DELAY_H_ */
122 #ifndef _LINUX_MUTEX_H_
123 #define _LINUX_MUTEX_H_
125 #include <sys/param.h>
126 #include <sys/lock.h>
127 #include <sys/sx.h>
129 typedef struct mutex {
130 struct sx sx;
131 } mutex_t;
133 #define mutex_lock(_m) sx_xlock(&(_m)->sx)
134 #define mutex_lock_nested(_m, _s) mutex_lock(_m)
135 #define mutex_lock_interruptible(_m) ({ mutex_lock((_m)); 0; })
136 #define mutex_unlock(_m) sx_xunlock(&(_m)->sx)
137 #define mutex_trylock(_m) !!sx_try_xlock(&(_m)->sx)
139 #define DEFINE_MUTEX(lock) \
140 mutex_t lock; \
141 SX_SYSINIT_FLAGS(lock, &(lock).sx, "lnxmtx", SX_NOWITNESS)
143 static inline void
144 linux_mutex_init(mutex_t *m)
147 memset(&m->sx, 0, sizeof(m->sx));
148 sx_init_flags(&m->sx, "lnxmtx", SX_NOWITNESS);
151 #define mutex_init linux_mutex_init
153 #endif /* _LINUX_MUTEX_H_ */
155 #ifndef _LINUX_TIMER_H_
156 #define _LINUX_TIMER_H_
158 #include <sys/param.h>
159 #include <sys/kernel.h>
160 #include <sys/callout.h>
162 struct timer_list {
163 struct callout timer_callout;
164 void (*function)(unsigned long);
165 unsigned long data;
168 #define expires timer_callout.c_time
170 static inline void
171 _timer_fn(void *context)
173 struct timer_list *timer;
175 timer = context;
176 timer->function(timer->data);
179 #define setup_timer(timer, func, dat) \
180 do { \
181 (timer)->function = (func); \
182 (timer)->data = (dat); \
183 callout_init(&(timer)->timer_callout, CALLOUT_MPSAFE); \
184 } while (0)
186 #define init_timer(timer) \
187 do { \
188 (timer)->function = NULL; \
189 (timer)->data = 0; \
190 callout_init(&(timer)->timer_callout, CALLOUT_MPSAFE); \
191 } while (0)
193 #define mod_timer(timer, expire) \
194 callout_reset(&(timer)->timer_callout, (expire) - jiffies, \
195 _timer_fn, (timer))
197 #define add_timer(timer) \
198 callout_reset(&(timer)->timer_callout, \
199 (timer)->timer_callout.c_time - jiffies, _timer_fn, (timer))
201 #define del_timer(timer) callout_stop(&(timer)->timer_callout)
202 #define del_timer_sync(timer) callout_drain(&(timer)->timer_callout)
204 #define timer_pending(timer) callout_pending(&(timer)->timer_callout)
206 static inline unsigned long
207 round_jiffies(unsigned long j)
209 return roundup(j, hz);
212 #endif /* _LINUX_TIMER_H_ */
214 #ifndef _LINUX_WORKQUEUE_H_
215 #define _LINUX_WORKQUEUE_H_
217 #include <sys/taskqueue.h>
219 struct workqueue_struct {
220 struct taskqueue *taskqueue;
223 struct work_struct {
224 struct task work_task;
225 struct taskqueue *taskqueue;
226 void (*fn)(struct work_struct *);
229 struct delayed_work {
230 struct work_struct work;
231 struct callout timer;
234 static inline struct delayed_work *
235 to_delayed_work(struct work_struct *work)
238 return container_of(work, struct delayed_work, work);
242 static inline void
243 _work_fn(void *context, int pending)
245 struct work_struct *work;
247 work = context;
248 work->fn(work);
251 #define INIT_WORK(work, func) \
252 do { \
253 (work)->fn = (func); \
254 (work)->taskqueue = NULL; \
255 TASK_INIT(&(work)->work_task, 0, _work_fn, (work)); \
256 } while (0)
258 #define INIT_DELAYED_WORK(_work, func) \
259 do { \
260 INIT_WORK(&(_work)->work, func); \
261 callout_init(&(_work)->timer, CALLOUT_MPSAFE); \
262 } while (0)
264 #define INIT_DELAYED_WORK_DEFERRABLE INIT_DELAYED_WORK
266 #define schedule_work(work) \
267 do { \
268 (work)->taskqueue = taskqueue_thread; \
269 taskqueue_enqueue(taskqueue_thread, &(work)->work_task); \
270 } while (0)
272 #define flush_scheduled_work() flush_taskqueue(taskqueue_thread)
274 #define queue_work(q, work) \
275 do { \
276 (work)->taskqueue = (q)->taskqueue; \
277 taskqueue_enqueue((q)->taskqueue, &(work)->work_task); \
278 } while (0)
280 static inline void
281 _delayed_work_fn(void *arg)
283 struct delayed_work *work;
285 work = arg;
286 taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
289 static inline int
290 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
291 unsigned long delay)
293 int pending;
295 pending = work->work.work_task.ta_pending;
296 work->work.taskqueue = wq->taskqueue;
297 if (delay != 0)
298 callout_reset(&work->timer, delay, _delayed_work_fn, work);
299 else
300 _delayed_work_fn((void *)work);
302 return (!pending);
305 static inline struct workqueue_struct *
306 _create_workqueue_common(char *name, int cpus)
308 struct workqueue_struct *wq;
310 wq = kmalloc(sizeof(*wq), M_WAITOK);
311 wq->taskqueue = taskqueue_create((name), M_WAITOK,
312 taskqueue_thread_enqueue, &wq->taskqueue);
313 taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, (name));
315 return (wq);
319 #define create_singlethread_workqueue(name) \
320 _create_workqueue_common(name, 1)
322 #define create_workqueue(name) \
323 _create_workqueue_common(name, MAXCPU)
325 static inline void
326 destroy_workqueue(struct workqueue_struct *wq)
328 taskqueue_free(wq->taskqueue);
329 kfree(wq);
332 #define flush_workqueue(wq) flush_taskqueue((wq)->taskqueue)
334 static inline void
335 _flush_fn(void *context, int pending)
339 static inline void
340 flush_taskqueue(struct taskqueue *tq)
342 struct task flushtask;
344 PHOLD(curproc);
345 TASK_INIT(&flushtask, 0, _flush_fn, NULL);
346 taskqueue_enqueue(tq, &flushtask);
347 taskqueue_drain(tq, &flushtask);
348 PRELE(curproc);
351 static inline int
352 cancel_work_sync(struct work_struct *work)
354 if (work->taskqueue &&
355 taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
356 taskqueue_drain(work->taskqueue, &work->work_task);
357 return 0;
361 * This may leave work running on another CPU as it does on Linux.
363 static inline int
364 cancel_delayed_work(struct delayed_work *work)
367 callout_stop(&work->timer);
368 if (work->work.taskqueue &&
369 taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
370 taskqueue_drain(work->work.taskqueue, &work->work.work_task);
371 return 0;
374 #endif /* _LINUX_WORKQUEUE_H_ */
376 #ifndef _LINUX_KREF_H_
377 #define _LINUX_KREF_H_
379 #include <sys/refcount.h>
381 struct kref {
382 volatile u_int count;
385 static inline void
386 kref_init(struct kref *kref)
389 refcount_init(&kref->count, 1);
392 static inline void
393 kref_get(struct kref *kref)
396 refcount_acquire(&kref->count);
399 static inline int
400 kref_put(struct kref *kref, void (*rel)(struct kref *kref))
403 if (refcount_release(&kref->count)) {
404 rel(kref);
405 return 1;
407 return 0;
410 #endif /* _KREF_H_ */
412 #ifndef _LINUX_LOG2_H_
413 #define _LINUX_LOG2_H_
415 #include <sys/libkern.h>
417 static inline unsigned long
418 roundup_pow_of_two(unsigned long x)
420 return (1UL << flsl(x - 1));
423 static inline int
424 is_power_of_2(unsigned long n)
426 return (n == roundup_pow_of_two(n));
429 static inline unsigned long
430 rounddown_pow_of_two(unsigned long x)
432 return (1UL << (flsl(x) - 1));
435 static inline unsigned long
436 ilog2(unsigned long x)
438 return (flsl(x) - 1);
441 #endif /* _LINUX_LOG2_H_ */
443 #define EREMOTEIO ENXIO
445 #ifndef _LINUX_ERR_H_
446 #define _LINUX_ERR_H_
448 #define MAX_ERRNO 4095
450 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
452 static inline void *
453 ERR_PTR(long error)
455 return (void *)error;
458 static inline long
459 PTR_ERR(const void *ptr)
461 return (long)ptr;
464 static inline long
465 IS_ERR(const void *ptr)
467 return IS_ERR_VALUE((unsigned long)ptr);
470 static inline void *
471 ERR_CAST(void *ptr)
473 return (void *)ptr;
476 #endif /* _LINUX_ERR_H_ */
478 // Not defined inside _LINUX_WORKQUEUE_H_ header
479 static inline int
480 work_pending(struct work_struct *work)
482 return to_delayed_work(work)->work.work_task.ta_pending;