Avoid beyond bounds copy while caching ACL
[zen-stable.git] / arch / c6x / include / asm / processor.h
blob77ecbded1f370d07b5d736c22dde0b2a52a29222
1 /*
2 * Port on Texas Instruments TMS320C6x architecture
4 * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
7 * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #ifndef _ASM_C6X_PROCESSOR_H
14 #define _ASM_C6X_PROCESSOR_H
16 #include <asm/ptrace.h>
17 #include <asm/page.h>
18 #include <asm/current.h>
21 * Default implementation of macro that returns current
22 * instruction pointer ("program counter").
24 #define current_text_addr() \
25 ({ \
26 void *__pc; \
27 asm("mvc .S2 pce1,%0\n" : "=b"(__pc)); \
28 __pc; \
32 * User space process size. This is mostly meaningless for NOMMU
33 * but some C6X processors may have RAM addresses up to 0xFFFFFFFF.
34 * Since calls like mmap() can return an address or an error, we
35 * have to allow room for error returns when code does something
36 * like:
38 * addr = do_mmap(...)
39 * if ((unsigned long)addr >= TASK_SIZE)
40 * ... its an error code, not an address ...
42 * Here, we allow for 4096 error codes which means we really can't
43 * use the last 4K page on systems with RAM extending all the way
44 * to the end of the 32-bit address space.
46 #define TASK_SIZE 0xFFFFF000
49 * This decides where the kernel will search for a free chunk of vm
50 * space during mmap's. We won't be using it
52 #define TASK_UNMAPPED_BASE 0
54 struct thread_struct {
55 unsigned long long b15_14;
56 unsigned long long a15_14;
57 unsigned long long b13_12;
58 unsigned long long a13_12;
59 unsigned long long b11_10;
60 unsigned long long a11_10;
61 unsigned long long ricl_icl;
62 unsigned long usp; /* user stack pointer */
63 unsigned long pc; /* kernel pc */
64 unsigned long wchan;
67 #define INIT_THREAD \
68 { \
69 .usp = 0, \
70 .wchan = 0, \
73 #define INIT_MMAP { \
74 &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, \
75 NULL, NULL }
77 #define task_pt_regs(task) \
78 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(task)) - 1)
80 #define alloc_kernel_stack() __get_free_page(GFP_KERNEL)
81 #define free_kernel_stack(page) free_page((page))
84 /* Forward declaration, a strange C thing */
85 struct task_struct;
87 extern void start_thread(struct pt_regs *regs, unsigned int pc,
88 unsigned long usp);
90 /* Free all resources held by a thread. */
91 static inline void release_thread(struct task_struct *dead_task)
95 /* Prepare to copy thread state - unlazy all lazy status */
96 #define prepare_to_copy(tsk) do { } while (0)
98 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
100 #define copy_segments(tsk, mm) do { } while (0)
101 #define release_segments(mm) do { } while (0)
104 * saved PC of a blocked thread.
106 #define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
109 * saved kernel SP and DP of a blocked thread.
111 #ifdef _BIG_ENDIAN
112 #define thread_saved_ksp(tsk) \
113 (*(unsigned long *)&(tsk)->thread.b15_14)
114 #define thread_saved_dp(tsk) \
115 (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
116 #else
117 #define thread_saved_ksp(tsk) \
118 (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
119 #define thread_saved_dp(tsk) \
120 (*(unsigned long *)&(tsk)->thread.b15_14)
121 #endif
123 extern unsigned long get_wchan(struct task_struct *p);
125 #define KSTK_EIP(task) (task_pt_regs(task)->pc)
126 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
128 #define cpu_relax() do { } while (0)
130 extern const struct seq_operations cpuinfo_op;
132 #endif /* ASM_C6X_PROCESSOR_H */