1 // Note: Need to change /sys/conf/kmod.conf for FreeBSD
2 // set CSTD to gnu99 to enable support for nameless unions/structs
4 // Some headers used from /sys/ofed/include which all bear the following copyright:
6 * Copyright (c) 2010 Isilon Systems, Inc.
7 * Copyright (c) 2010 iX Systems, Inc.
8 * Copyright (c) 2010 Panasas, Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice unmodified, this list of conditions, and the following
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/param.h>
34 #include <sys/types.h>
36 #include <sys/mutex.h>
37 #include <sys/limits.h>
41 #define PSCNV_KAPI_GETPARAM_BUS_TYPE
43 typedef DRM_SPINTYPE spinlock_t
;
46 #define cpu_to_le16(x) htole16(x)
47 #define le16_to_cpu(x) le16toh(x)
50 #define spin_lock_init(lock) DRM_SPININIT(lock, #lock)
51 #define spin_lock_destroy(lock) DRM_SPINUNINIT(lock)
52 #define spin_lock(lock) DRM_SPINLOCK(lock)
53 #define spin_unlock(lock) DRM_SPINUNLOCK(lock)
54 #define spin_lock_irqsave(lock, flags) DRM_SPINLOCK_IRQSAVE(lock, flags)
55 #define spin_unlock_irqrestore(lock, flags) DRM_SPINUNLOCK_IRQRESTORE(lock, flags)
57 #define kfree(x) drm_free(x, 0, DRM_MEM_DRIVER)
58 #define kzalloc(x, y) drm_calloc(x, 1, DRM_MEM_DRIVER)
59 #define kcalloc(x, y, z) drm_calloc(x, y, DRM_MEM_DRIVER)
60 #define kmalloc(x, y) drm_alloc(x, DRM_MEM_DRIVER)
62 struct device_attribute
{};
63 struct notifier_block
{};
64 typedef struct pm_message_t
{ } pm_message_t
;
71 struct vm_area_struct
{};
77 #define power_supply_is_system_supplied() 1
79 /* Dog slow version, but only called once
80 * Linux kernel has a faster software fallback
81 * but I prefer not to GPL this or even worry about it
83 static inline u32
hweight32(u32 val
)
86 for (i
= 1; i
; i
<<= 1)
92 #define WARN(x, arg1, args...) do { \
94 printf("%s:%d/%s " arg1, \
95 __FILE__, __LINE__, __FUNCTION__, ##args); \
98 #define BUG() panic("BUG()\n");
99 #define WARN_ON(x,y,z...) do { if ((x)) WARN(y, ##z); } while (0)
100 #define BUG_ON(x) do { if ((x)) { panic(#x "triggered\n"); } } while (0)
106 #ifndef _LINUX_DELAY_H_
107 #define _LINUX_DELAY_H_
112 pause("lnxsleep", msecs_to_jiffies(ms
));
116 #define msleep linux_msleep
117 #define udelay(x) DELAY((x))
118 #define mdelay(x) DELAY((x) * 1000)
120 #endif /* _LINUX_DELAY_H_ */
122 #ifndef _LINUX_MUTEX_H_
123 #define _LINUX_MUTEX_H_
125 #include <sys/param.h>
126 #include <sys/lock.h>
129 typedef struct mutex
{
133 #define mutex_lock(_m) sx_xlock(&(_m)->sx)
134 #define mutex_lock_nested(_m, _s) mutex_lock(_m)
135 #define mutex_lock_interruptible(_m) ({ mutex_lock((_m)); 0; })
136 #define mutex_unlock(_m) sx_xunlock(&(_m)->sx)
137 #define mutex_trylock(_m) !!sx_try_xlock(&(_m)->sx)
139 #define DEFINE_MUTEX(lock) \
141 SX_SYSINIT_FLAGS(lock, &(lock).sx, "lnxmtx", SX_NOWITNESS)
144 linux_mutex_init(mutex_t
*m
)
147 memset(&m
->sx
, 0, sizeof(m
->sx
));
148 sx_init_flags(&m
->sx
, "lnxmtx", SX_NOWITNESS
);
151 #define mutex_init linux_mutex_init
153 #endif /* _LINUX_MUTEX_H_ */
155 #ifndef _LINUX_TIMER_H_
156 #define _LINUX_TIMER_H_
158 #include <sys/param.h>
159 #include <sys/kernel.h>
160 #include <sys/callout.h>
163 struct callout timer_callout
;
164 void (*function
)(unsigned long);
168 #define expires timer_callout.c_time
171 _timer_fn(void *context
)
173 struct timer_list
*timer
;
176 timer
->function(timer
->data
);
179 #define setup_timer(timer, func, dat) \
181 (timer)->function = (func); \
182 (timer)->data = (dat); \
183 callout_init(&(timer)->timer_callout, CALLOUT_MPSAFE); \
186 #define init_timer(timer) \
188 (timer)->function = NULL; \
190 callout_init(&(timer)->timer_callout, CALLOUT_MPSAFE); \
193 #define mod_timer(timer, expire) \
194 callout_reset(&(timer)->timer_callout, (expire) - jiffies, \
197 #define add_timer(timer) \
198 callout_reset(&(timer)->timer_callout, \
199 (timer)->timer_callout.c_time - jiffies, _timer_fn, (timer))
201 #define del_timer(timer) callout_stop(&(timer)->timer_callout)
202 #define del_timer_sync(timer) callout_drain(&(timer)->timer_callout)
204 #define timer_pending(timer) callout_pending(&(timer)->timer_callout)
206 static inline unsigned long
207 round_jiffies(unsigned long j
)
209 return roundup(j
, hz
);
212 #endif /* _LINUX_TIMER_H_ */
214 #ifndef _LINUX_WORKQUEUE_H_
215 #define _LINUX_WORKQUEUE_H_
217 #include <sys/taskqueue.h>
219 struct workqueue_struct
{
220 struct taskqueue
*taskqueue
;
224 struct task work_task
;
225 struct taskqueue
*taskqueue
;
226 void (*fn
)(struct work_struct
*);
229 struct delayed_work
{
230 struct work_struct work
;
231 struct callout timer
;
234 static inline struct delayed_work
*
235 to_delayed_work(struct work_struct
*work
)
238 return container_of(work
, struct delayed_work
, work
);
243 _work_fn(void *context
, int pending
)
245 struct work_struct
*work
;
251 #define INIT_WORK(work, func) \
253 (work)->fn = (func); \
254 (work)->taskqueue = NULL; \
255 TASK_INIT(&(work)->work_task, 0, _work_fn, (work)); \
258 #define INIT_DELAYED_WORK(_work, func) \
260 INIT_WORK(&(_work)->work, func); \
261 callout_init(&(_work)->timer, CALLOUT_MPSAFE); \
264 #define INIT_DELAYED_WORK_DEFERRABLE INIT_DELAYED_WORK
266 #define schedule_work(work) \
268 (work)->taskqueue = taskqueue_thread; \
269 taskqueue_enqueue(taskqueue_thread, &(work)->work_task); \
272 #define flush_scheduled_work() flush_taskqueue(taskqueue_thread)
274 #define queue_work(q, work) \
276 (work)->taskqueue = (q)->taskqueue; \
277 taskqueue_enqueue((q)->taskqueue, &(work)->work_task); \
281 _delayed_work_fn(void *arg
)
283 struct delayed_work
*work
;
286 taskqueue_enqueue(work
->work
.taskqueue
, &work
->work
.work_task
);
290 queue_delayed_work(struct workqueue_struct
*wq
, struct delayed_work
*work
,
295 pending
= work
->work
.work_task
.ta_pending
;
296 work
->work
.taskqueue
= wq
->taskqueue
;
298 callout_reset(&work
->timer
, delay
, _delayed_work_fn
, work
);
300 _delayed_work_fn((void *)work
);
305 static inline struct workqueue_struct
*
306 _create_workqueue_common(char *name
, int cpus
)
308 struct workqueue_struct
*wq
;
310 wq
= kmalloc(sizeof(*wq
), M_WAITOK
);
311 wq
->taskqueue
= taskqueue_create((name
), M_WAITOK
,
312 taskqueue_thread_enqueue
, &wq
->taskqueue
);
313 taskqueue_start_threads(&wq
->taskqueue
, cpus
, PWAIT
, (name
));
319 #define create_singlethread_workqueue(name) \
320 _create_workqueue_common(name, 1)
322 #define create_workqueue(name) \
323 _create_workqueue_common(name, MAXCPU)
326 destroy_workqueue(struct workqueue_struct
*wq
)
328 taskqueue_free(wq
->taskqueue
);
332 #define flush_workqueue(wq) flush_taskqueue((wq)->taskqueue)
335 _flush_fn(void *context
, int pending
)
340 flush_taskqueue(struct taskqueue
*tq
)
342 struct task flushtask
;
345 TASK_INIT(&flushtask
, 0, _flush_fn
, NULL
);
346 taskqueue_enqueue(tq
, &flushtask
);
347 taskqueue_drain(tq
, &flushtask
);
352 cancel_work_sync(struct work_struct
*work
)
354 if (work
->taskqueue
&&
355 taskqueue_cancel(work
->taskqueue
, &work
->work_task
, NULL
))
356 taskqueue_drain(work
->taskqueue
, &work
->work_task
);
361 * This may leave work running on another CPU as it does on Linux.
364 cancel_delayed_work(struct delayed_work
*work
)
367 callout_stop(&work
->timer
);
368 if (work
->work
.taskqueue
&&
369 taskqueue_cancel(work
->work
.taskqueue
, &work
->work
.work_task
, NULL
))
370 taskqueue_drain(work
->work
.taskqueue
, &work
->work
.work_task
);
374 #endif /* _LINUX_WORKQUEUE_H_ */
376 #ifndef _LINUX_KREF_H_
377 #define _LINUX_KREF_H_
379 #include <sys/refcount.h>
382 volatile u_int count
;
386 kref_init(struct kref
*kref
)
389 refcount_init(&kref
->count
, 1);
393 kref_get(struct kref
*kref
)
396 refcount_acquire(&kref
->count
);
400 kref_put(struct kref
*kref
, void (*rel
)(struct kref
*kref
))
403 if (refcount_release(&kref
->count
)) {
410 #endif /* _KREF_H_ */
412 #ifndef _LINUX_LOG2_H_
413 #define _LINUX_LOG2_H_
415 #include <sys/libkern.h>
417 static inline unsigned long
418 roundup_pow_of_two(unsigned long x
)
420 return (1UL << flsl(x
- 1));
424 is_power_of_2(unsigned long n
)
426 return (n
== roundup_pow_of_two(n
));
429 static inline unsigned long
430 rounddown_pow_of_two(unsigned long x
)
432 return (1UL << (flsl(x
) - 1));
435 static inline unsigned long
436 ilog2(unsigned long x
)
438 return (flsl(x
) - 1);
441 #endif /* _LINUX_LOG2_H_ */
443 #define EREMOTEIO ENXIO
445 #ifndef _LINUX_ERR_H_
446 #define _LINUX_ERR_H_
448 #define MAX_ERRNO 4095
450 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
455 return (void *)error
;
459 PTR_ERR(const void *ptr
)
465 IS_ERR(const void *ptr
)
467 return IS_ERR_VALUE((unsigned long)ptr
);
476 #endif /* _LINUX_ERR_H_ */
478 // Not defined inside _LINUX_WORKQUEUE_H_ header
480 work_pending(struct work_struct
*work
)
482 return to_delayed_work(work
)->work
.work_task
.ta_pending
;