2 Copyright 2009, The AROS Development Team. All rights reserved.
6 #ifndef _DRM_COMPAT_FUNCS_
7 #define _DRM_COMPAT_FUNCS_
9 #include <proto/exec.h>
10 #include <aros/debug.h>
12 #include "drm_compat_types.h"
14 #define writeq(val, addr) (*(volatile UQUAD*)(addr) = (val))
15 #define readq(addr) (*(volatile UQUAD*)(addr)
16 #define writel(val, addr) (*(volatile ULONG*)(addr) = (val))
17 #define readl(addr) (*(volatile ULONG*)(addr))
18 #define writew(val, addr) (*(volatile UWORD*)(addr) = (val))
19 #define readw(addr) (*(volatile UWORD*)(addr))
20 #define writeb(val, addr) (*(volatile UBYTE*)(addr) = (val))
21 #define readb(addr) (*(volatile UBYTE*)(addr))
22 #define kzalloc(size, flags) HIDDNouveauAlloc(size)
23 #define kcalloc(count, size, flags) HIDDNouveauAlloc((count) * (size))
24 #define kmalloc(size, flags) HIDDNouveauAlloc(size)
25 #define vmalloc_user(size) HIDDNouveauAlloc(size)
26 #define vmalloc(size) HIDDNouveauAlloc(size)
27 #define kfree(objp) HIDDNouveauFree(objp)
28 #define vfree(objp) HIDDNouveauFree(objp)
29 #define capable(p) TRUE
30 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
31 #define round_up(x, y) roundup(x, y)
32 #define lower_32_bits(n) ((u32)(n))
33 #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
34 #define mutex_lock(x) ObtainSemaphore(x.semaphore)
35 #define mutex_lock_nested(x, y) mutex_lock(x)
36 #define mutex_unlock(x) ReleaseSemaphore(x.semaphore)
37 #define mutex_trylock(x) AttemptSemaphore(x.semaphore)
38 #define mutex_init(x) InitSemaphore(x.semaphore);
39 #define likely(x) __builtin_expect((IPTR)(x),1)
40 #define unlikely(x) __builtin_expect((IPTR)(x),0)
41 #define mb() __asm __volatile("lock; addl $0,0(%%esp)" : : : "memory");
42 #define wmb() __asm __volatile("" : : : "memory");
43 #define ffs(x) __builtin_ffs(x)
44 #define fls_long(x) ((sizeof(x) * 8) - __builtin_clzl(x))
45 #define max(a, b) ((a) > (b) ? (a) : (b))
46 #define min(a, b) ((a) < (b) ? (a) : (b))
47 #define clamp(a, vmin, vmax) ((min(vmax, max(vmin, a))))
48 #define ilog2(n) (fls_long(n) - 1)
49 #define rounddown_pow_of_two(n) (1UL << ilog2(n))
50 #define is_power_of_2(x) (x != 0 && ((x & (x - 1)) == 0))
51 #define access_ok(a, b, c) TRUE
52 #define le16_to_cpu(x) AROS_LE2WORD(x)
53 #define le32_to_cpu(x) AROS_LE2LONG(x)
54 #define cpu_to_le16(x) AROS_WORD2LE(x)
55 #define mdelay(x) udelay(1000 * x)
56 #define msleep(x) udelay(1000 * x)
57 #define KHZ2PICOS(x) (1000000000UL/(x))
58 #define uninitialized_var(x) x = 0
59 #define get_user(x, p) ({u32 ret = 0; x = *(p); ret;})
60 #define put_user(x, p) ({u32 ret = 0; *(p) = x; ret;})
61 #define rounddown(x, y) (((x)/(y))*(y))
62 #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
66 APTR
HIDDNouveauAlloc(ULONG size
);
67 VOID
HIDDNouveauFree(APTR memory
);
69 void iowrite32(u32 val
, void * addr
);
70 unsigned int ioread32(void * addr
);
71 void iowrite16(u16 val
, void * addr
);
72 unsigned int ioread16(void * addr
);
73 void iowrite8(u8 val
, void * addr
);
74 unsigned int ioread8(void * addr
);
76 void udelay(unsigned long usecs
);
77 int abs(int j
); /* Code in librom.a */
79 static inline ULONG
copy_from_user(APTR to
, APTR from
, IPTR size
)
81 memcpy(to
, from
, size
);
85 static inline ULONG
copy_to_user(APTR to
, APTR from
, IPTR size
)
87 memcpy(to
, from
, size
);
91 static inline VOID
memcpy_toio(APTR dst
, CONST_APTR src
, ULONG size
)
93 /* TODO: optimize by using writel */
94 UBYTE
* srcp
= (UBYTE
*)src
;
97 for (i
= 0; i
< size
; i
++)
98 writeb(*(srcp
+ i
), dst
+ i
);
101 static inline VOID
memcpy_fromio(APTR dst
, CONST_APTR src
, ULONG size
)
103 /* TODO: optimize by using readl */
104 UBYTE
* dstp
= (UBYTE
*)dst
;
107 for (i
= 0; i
< size
; i
++)
108 *(dstp
+ i
) = readb(src
+ i
);
111 #define BUG_ON(condition) do { if (unlikely(condition)) bug("BUG: %s:%d\n", __FILE__, __LINE__); } while(0)
112 #define WARN_ON(condition) do { if (unlikely(condition)) bug("WARN: %s:%d\n", __FILE__, __LINE__); } while(0)
113 #define EXPORT_SYMBOL(x)
114 #define PTR_ERR(addr) ((SIPTR)addr)
115 #define ERR_PTR(error) ((APTR)(SIPTR)error)
116 static inline IPTR
IS_ERR(APTR ptr
)
118 return (IPTR
)(ptr
) >= (IPTR
)-MAX_ERRNO
;
127 #define printk(fmt, ...) bug(fmt, ##__VA_ARGS__)
128 #define IMPLEMENT(fmt, ...) bug("------IMPLEMENT(%s): " fmt, __func__ , ##__VA_ARGS__)
129 #define TRACE(fmt, ...) D(bug("[TRACE](%s): " fmt, __func__ , ##__VA_ARGS__))
130 #define BUG(x) bug("BUG:(%s)\n", __func__)
131 #define WARN(condition, message, ...) do { if (unlikely(condition)) bug("WARN: %s:%d" message "\n", __FILE__, __LINE__, ##__VA_ARGS__); } while(0)
132 #define dev_warn(dev, fmt, ...) bug(fmt, ##__VA_ARGS__)
135 void * ioremap(resource_size_t offset
, unsigned long size
);
136 #define pci_map_page(a, b, c, d, e) (dma_addr_t)(b->address + c)
137 #define pci_dma_mapping_error(a, b) FALSE
138 #define pci_unmap_page(a, b, c, d)
139 #define ioremap_nocache ioremap
140 #define ioremap_wc ioremap
141 void iounmap(void * addr
);
142 resource_size_t
pci_resource_start(struct pci_dev
* pdev
, unsigned int barnum
);
143 unsigned long pci_resource_len(struct pci_dev
* pdev
, unsigned int barnum
);
144 #define PCI_DEVFN(dev, fun) dev, fun
145 void * pci_get_bus_and_slot(unsigned int bus
, unsigned int dev
, unsigned int fun
);
146 int pci_read_config_word(struct pci_dev
* pdev
, int where
, u16
*val
);
147 int pci_read_config_dword(struct pci_dev
* pdev
, int where
, u32
*val
);
148 int pci_write_config_dword(struct pci_dev
* pdev
, int where
, u32 val
);
149 #define pci_name(pdev) ((const char *)pdev->name)
150 int pci_is_pcie(struct pci_dev
* pdev
);
155 void clear_bit(int nr
, volatile void * addr
);
156 void set_bit(int nr
, volatile void *addr
);
157 int test_bit(int nr
, volatile void *addr
);
158 #define __set_bit(nr, addr) set_bit(nr, addr)
159 #define __clear_bit(nr, addr) clear_bit(nr, addr)
162 void __free_page(struct page
* p
);
163 struct page
* create_page_helper(); /* Helper function - not from compat */
164 #define PageHighMem(p) FALSE
165 #define put_page(p) __free_page(p) /*FIXME: This might be wrong */
166 #define page_to_phys(p) (dma_addr_t)p->address
167 #define kmap(p) p->address
168 #define kmap_atomic(p, type) p->address
169 #define vmap(p, count, flags, prot) (p)[0]->address
170 #define kunmap_atomic(addr, type)
173 #define set_page_dirty(p)
175 /* Atomic handling */
176 static inline int atomic_add_return(int i
, atomic_t
*v
)
178 return __sync_add_and_fetch(&v
->count
, i
);
181 static inline void atomic_add(int i
, atomic_t
*v
)
183 (void)__sync_add_and_fetch(&v
->count
, i
);
186 static inline void atomic_inc(atomic_t
*v
)
188 (void)__sync_add_and_fetch(&v
->count
, 1);
191 static inline void atomic_set(atomic_t
*v
, int i
)
196 static inline int atomic_read(atomic_t
*v
)
201 static inline void atomic_sub(int i
, atomic_t
*v
)
203 (void)__sync_sub_and_fetch(&v
->count
, i
);
206 static inline void atomic_dec(atomic_t
*v
)
208 (void)__sync_sub_and_fetch(&v
->count
, 1);
211 static inline int atomic_dec_and_test(atomic_t
*v
)
213 return (__sync_sub_and_fetch(&v
->count
, 1) == 0);
216 static inline int atomic_sub_and_test(int i
, atomic_t
*v
)
218 return (__sync_sub_and_fetch(&v
->count
, i
) == 0);
221 static inline int atomic_cmpxchg(atomic_t
*v
, int old
, int new)
223 return __sync_val_compare_and_swap(&v
->count
, old
, new);
226 static inline int atomic_inc_not_zero(atomic_t
*v
)
228 int val
= atomic_read(v
);
237 /* A code protected by spin lock is quaranteed to be atomic. This means that
238 * preemtion on this CPU needs to be disabled for the time of executing.
239 * Additionally, if the _irq variant of spin lock functions is used,
240 * it is also guaraneteed that interrupts are disabled on the executing CPU.
241 * The _bh variant disables the "bottom half" processing which is currently not
242 * implemented in compat wrappers.
245 static inline void spin_lock_init(spinlock_t
* lock
)
249 static inline void spin_lock(spinlock_t
* lock
)
253 static inline void spin_unlock(spinlock_t
* lock
)
258 #define spin_lock_bh(x) spin_lock(x)
259 #define spin_unlock_bh(x) spin_unlock(x)
261 #define spin_lock_irqsave(lock, flags) \
269 #define spin_unlock_irqrestore(lock, flags) \
277 #define spin_lock_irq(x) spin_lock_irqsave(x, 0)
278 #define spin_unlock_irq(x) spin_unlock_irqrestore(x, 0)
280 /* TODO: This may work incorrectly if write_lock and read_lock are used for the same lock as
281 * read_lock allows concurent readers as long as there is no writer
283 static inline void rwlock_init(rwlock_t
* lock
)
287 static inline void write_lock(rwlock_t
* lock
)
291 static inline void write_unlock(rwlock_t
* lock
)
296 /* Reference counted objects implementation */
297 static inline void kref_init(struct kref
*kref
)
299 atomic_set(&kref
->refcount
, 1);
302 static inline void kref_get(struct kref
*kref
)
304 atomic_inc(&kref
->refcount
);
307 static inline int kref_put(struct kref
*kref
, void (*release
) (struct kref
*kref
))
309 if (atomic_dec_and_test(&kref
->refcount
))
318 static inline int kref_sub(struct kref
*kref
, unsigned int count
, void (*release
) (struct kref
*kref
))
320 if (atomic_sub_and_test(count
, &kref
->refcount
))
331 #define idr_pre_get(a, b) idr_pre_get_internal(a)
332 int idr_pre_get_internal(struct idr
*idp
);
333 int idr_get_new_above(struct idr
*idp
, void *ptr
, int starting_id
, int *id
);
334 void *idr_find(struct idr
*idp
, int id
);
335 void idr_remove(struct idr
*idp
, int id
);
336 void idr_init(struct idr
*idp
);
339 struct agp_bridge_data
*agp_backend_acquire(void * dev
);
340 void agp_backend_release(struct agp_bridge_data
* bridge
);
341 struct agp_bridge_data
* agp_find_bridge(void * dev
);
342 int agp_copy_info(struct agp_bridge_data
* bridge
, struct agp_kern_info
* info
);
343 void agp_enable(struct agp_bridge_data
* bridge
, u32 mode
);
344 struct agp_memory
*agp_allocate_memory(struct agp_bridge_data
* bridge
, size_t num_pages
, u32 type
);
345 void agp_free_memory(struct agp_memory
* mem
);
346 int agp_bind_memory(struct agp_memory
* mem
, off_t offset
);
347 int agp_unbind_memory(struct agp_memory
* mem
);
348 void agp_flush_chipset(struct agp_bridge_data
* bridge
);
350 /* io_mapping handling */
351 #define __copy_from_user_inatomic_nocache(to, from, size) copy_from_user(to, from, size)
352 #define io_mapping_map_atomic_wc(mapping, offset) (APTR)(mapping->address + (offset))
353 #define io_mapping_unmap_atomic(address)
354 static inline struct io_mapping
* io_mapping_create_wc(resource_size_t base
, unsigned long size
)
356 struct io_mapping
* mapping
= HIDDNouveauAlloc(sizeof(struct io_mapping
));
357 mapping
->address
= (IPTR
)ioremap(base
, size
);
360 static inline void io_mapping_free(struct io_mapping
*mapping
)
362 iounmap((APTR
)mapping
->address
);
363 HIDDNouveauFree(mapping
);
367 int i2c_transfer(struct i2c_adapter
*adap
, struct i2c_msg
*msgs
, int num
);
368 int i2c_del_adapter(struct i2c_adapter
*);
371 /* jiffies (lame) handling */
372 #define jiffies get_jiffies()
373 unsigned long get_jiffies();
375 /* Wait queue (lame) handling */
376 #define init_waitqueue_head(x)
377 #define wake_up_all(x)
378 #define wait_event(wq, condition) \
381 #define wait_event_interruptible(wq, condition) \
389 #define do_div(n,base) ({ \
391 __res = ((unsigned long) n) % (unsigned) base; \
392 n = ((unsigned long) n) / (unsigned) base; \
395 unsigned int hweight32(unsigned int number
);
396 unsigned int hweight8(unsigned int number
);
398 #endif /* _DRM_COMPAT_FUNCS_ */